diff --git a/context/context_test.go b/context/context_test.go index e7bf0acc2..2cb54edb8 100644 --- a/context/context_test.go +++ b/context/context_test.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.7 -// +build !go1.7 package context diff --git a/context/ctxhttp/ctxhttp_test.go b/context/ctxhttp/ctxhttp_test.go index 21f7599cc..d585f117f 100644 --- a/context/ctxhttp/ctxhttp_test.go +++ b/context/ctxhttp/ctxhttp_test.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !plan9 -// +build !plan9 package ctxhttp diff --git a/context/go17.go b/context/go17.go index 2cb9c408f..0c1b86793 100644 --- a/context/go17.go +++ b/context/go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.7 -// +build go1.7 package context diff --git a/context/go19.go b/context/go19.go index 64d31ecc3..e31e35a90 100644 --- a/context/go19.go +++ b/context/go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.9 -// +build go1.9 package context diff --git a/context/pre_go17.go b/context/pre_go17.go index 7b6b68511..065ff3dfa 100644 --- a/context/pre_go17.go +++ b/context/pre_go17.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.7 -// +build !go1.7 package context diff --git a/context/pre_go19.go b/context/pre_go19.go index 1f9715341..ec5a63803 100644 --- a/context/pre_go19.go +++ b/context/pre_go19.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.9 -// +build !go1.9 package context diff --git a/dns/dnsmessage/message.go b/dns/dnsmessage/message.go index 1577d4a19..a656efc12 100644 --- a/dns/dnsmessage/message.go +++ b/dns/dnsmessage/message.go @@ -273,7 +273,6 @@ var ( errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)") errNonCanonicalName = errors.New("name is not in canonical format (it must end with a .)") errStringTooLong = errors.New("character string exceeds maximum length (255)") - errCompressedSRV = errors.New("compressed name in SRV resource data") ) // Internal constants. @@ -361,6 +360,8 @@ func (m *Header) GoString() string { "Truncated: " + printBool(m.Truncated) + ", " + "RecursionDesired: " + printBool(m.RecursionDesired) + ", " + "RecursionAvailable: " + printBool(m.RecursionAvailable) + ", " + + "AuthenticData: " + printBool(m.AuthenticData) + ", " + + "CheckingDisabled: " + printBool(m.CheckingDisabled) + ", " + "RCode: " + m.RCode.GoString() + "}" } @@ -490,7 +491,7 @@ func (r *Resource) GoString() string { // A ResourceBody is a DNS resource record minus the header. type ResourceBody interface { // pack packs a Resource except for its header. - pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) + pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) // realType returns the actual type of the Resource. This is used to // fill in the header Type field. @@ -501,7 +502,7 @@ type ResourceBody interface { } // pack appends the wire format of the Resource to msg. -func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *Resource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { if r.Body == nil { return msg, errNilResouceBody } @@ -527,22 +528,26 @@ func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff i // When parsing is started, the Header is parsed. Next, each Question can be // either parsed or skipped. Alternatively, all Questions can be skipped at // once. When all Questions have been parsed, attempting to parse Questions -// will return (nil, nil) and attempting to skip Questions will return -// (true, nil). After all Questions have been either parsed or skipped, all +// will return the [ErrSectionDone] error. +// After all Questions have been either parsed or skipped, all // Answers, Authorities and Additionals can be either parsed or skipped in the // same way, and each type of Resource must be fully parsed or skipped before // proceeding to the next type of Resource. // +// Parser is safe to copy to preserve the parsing state. +// // Note that there is no requirement to fully skip or parse the message. type Parser struct { msg []byte header header - section section - off int - index int - resHeaderValid bool - resHeader ResourceHeader + section section + off int + index int + resHeaderValid bool + resHeaderOffset int + resHeaderType Type + resHeaderLength uint16 } // Start parses the header and enables the parsing of Questions. @@ -593,8 +598,9 @@ func (p *Parser) resource(sec section) (Resource, error) { func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) { if p.resHeaderValid { - return p.resHeader, nil + p.off = p.resHeaderOffset } + if err := p.checkAdvance(sec); err != nil { return ResourceHeader{}, err } @@ -604,14 +610,16 @@ func (p *Parser) resourceHeader(sec section) (ResourceHeader, error) { return ResourceHeader{}, err } p.resHeaderValid = true - p.resHeader = hdr + p.resHeaderOffset = p.off + p.resHeaderType = hdr.Type + p.resHeaderLength = hdr.Length p.off = off return hdr, nil } func (p *Parser) skipResource(sec section) error { - if p.resHeaderValid { - newOff := p.off + int(p.resHeader.Length) + if p.resHeaderValid && p.section == sec { + newOff := p.off + int(p.resHeaderLength) if newOff > len(p.msg) { return errResourceLen } @@ -742,6 +750,9 @@ func (p *Parser) AllAnswers() ([]Resource, error) { } // SkipAnswer skips a single Answer Resource. +// +// It does not perform a complete validation of the resource header, which means +// it may return a nil error when the [AnswerHeader] would actually return an error. func (p *Parser) SkipAnswer() error { return p.skipResource(sectionAnswers) } @@ -792,6 +803,9 @@ func (p *Parser) AllAuthorities() ([]Resource, error) { } // SkipAuthority skips a single Authority Resource. +// +// It does not perform a complete validation of the resource header, which means +// it may return a nil error when the [AuthorityHeader] would actually return an error. func (p *Parser) SkipAuthority() error { return p.skipResource(sectionAuthorities) } @@ -842,6 +856,9 @@ func (p *Parser) AllAdditionals() ([]Resource, error) { } // SkipAdditional skips a single Additional Resource. +// +// It does not perform a complete validation of the resource header, which means +// it may return a nil error when the [AdditionalHeader] would actually return an error. func (p *Parser) SkipAdditional() error { return p.skipResource(sectionAdditionals) } @@ -862,14 +879,14 @@ func (p *Parser) SkipAllAdditionals() error { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) CNAMEResource() (CNAMEResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeCNAME { + if !p.resHeaderValid || p.resHeaderType != TypeCNAME { return CNAMEResource{}, ErrNotStarted } r, err := unpackCNAMEResource(p.msg, p.off) if err != nil { return CNAMEResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -880,14 +897,14 @@ func (p *Parser) CNAMEResource() (CNAMEResource, error) { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) MXResource() (MXResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeMX { + if !p.resHeaderValid || p.resHeaderType != TypeMX { return MXResource{}, ErrNotStarted } r, err := unpackMXResource(p.msg, p.off) if err != nil { return MXResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -898,14 +915,14 @@ func (p *Parser) MXResource() (MXResource, error) { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) NSResource() (NSResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeNS { + if !p.resHeaderValid || p.resHeaderType != TypeNS { return NSResource{}, ErrNotStarted } r, err := unpackNSResource(p.msg, p.off) if err != nil { return NSResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -916,14 +933,14 @@ func (p *Parser) NSResource() (NSResource, error) { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) PTRResource() (PTRResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypePTR { + if !p.resHeaderValid || p.resHeaderType != TypePTR { return PTRResource{}, ErrNotStarted } r, err := unpackPTRResource(p.msg, p.off) if err != nil { return PTRResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -934,14 +951,14 @@ func (p *Parser) PTRResource() (PTRResource, error) { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) SOAResource() (SOAResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeSOA { + if !p.resHeaderValid || p.resHeaderType != TypeSOA { return SOAResource{}, ErrNotStarted } r, err := unpackSOAResource(p.msg, p.off) if err != nil { return SOAResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -952,14 +969,14 @@ func (p *Parser) SOAResource() (SOAResource, error) { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) TXTResource() (TXTResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeTXT { + if !p.resHeaderValid || p.resHeaderType != TypeTXT { return TXTResource{}, ErrNotStarted } - r, err := unpackTXTResource(p.msg, p.off, p.resHeader.Length) + r, err := unpackTXTResource(p.msg, p.off, p.resHeaderLength) if err != nil { return TXTResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -970,14 +987,14 @@ func (p *Parser) TXTResource() (TXTResource, error) { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) SRVResource() (SRVResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeSRV { + if !p.resHeaderValid || p.resHeaderType != TypeSRV { return SRVResource{}, ErrNotStarted } r, err := unpackSRVResource(p.msg, p.off) if err != nil { return SRVResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -988,14 +1005,14 @@ func (p *Parser) SRVResource() (SRVResource, error) { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) AResource() (AResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeA { + if !p.resHeaderValid || p.resHeaderType != TypeA { return AResource{}, ErrNotStarted } r, err := unpackAResource(p.msg, p.off) if err != nil { return AResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -1006,14 +1023,14 @@ func (p *Parser) AResource() (AResource, error) { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) AAAAResource() (AAAAResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeAAAA { + if !p.resHeaderValid || p.resHeaderType != TypeAAAA { return AAAAResource{}, ErrNotStarted } r, err := unpackAAAAResource(p.msg, p.off) if err != nil { return AAAAResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -1024,14 +1041,14 @@ func (p *Parser) AAAAResource() (AAAAResource, error) { // One of the XXXHeader methods must have been called before calling this // method. func (p *Parser) OPTResource() (OPTResource, error) { - if !p.resHeaderValid || p.resHeader.Type != TypeOPT { + if !p.resHeaderValid || p.resHeaderType != TypeOPT { return OPTResource{}, ErrNotStarted } - r, err := unpackOPTResource(p.msg, p.off, p.resHeader.Length) + r, err := unpackOPTResource(p.msg, p.off, p.resHeaderLength) if err != nil { return OPTResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -1045,11 +1062,11 @@ func (p *Parser) UnknownResource() (UnknownResource, error) { if !p.resHeaderValid { return UnknownResource{}, ErrNotStarted } - r, err := unpackUnknownResource(p.resHeader.Type, p.msg, p.off, p.resHeader.Length) + r, err := unpackUnknownResource(p.resHeaderType, p.msg, p.off, p.resHeaderLength) if err != nil { return UnknownResource{}, err } - p.off += int(p.resHeader.Length) + p.off += int(p.resHeaderLength) p.resHeaderValid = false p.index++ return r, nil @@ -1120,7 +1137,7 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) { // DNS messages can be a maximum of 512 bytes long. Without compression, // many DNS response messages are over this limit, so enabling // compression will help ensure compliance. - compression := map[string]int{} + compression := map[string]uint16{} for i := range m.Questions { var err error @@ -1211,7 +1228,7 @@ type Builder struct { // compression is a mapping from name suffixes to their starting index // in msg. - compression map[string]int + compression map[string]uint16 } // NewBuilder creates a new builder with compression disabled. @@ -1248,7 +1265,7 @@ func NewBuilder(buf []byte, h Header) Builder { // // Compression should be enabled before any sections are added for best results. func (b *Builder) EnableCompression() { - b.compression = map[string]int{} + b.compression = map[string]uint16{} } func (b *Builder) startCheck(s section) error { @@ -1664,7 +1681,7 @@ func (h *ResourceHeader) GoString() string { // pack appends the wire format of the ResourceHeader to oldMsg. // // lenOff is the offset in msg where the Length field was packed. -func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, lenOff int, err error) { +func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]uint16, compressionOff int) (msg []byte, lenOff int, err error) { msg = oldMsg if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil { return oldMsg, 0, &nestedError{"Name", err} @@ -1892,7 +1909,7 @@ func unpackBytes(msg []byte, off int, field []byte) (int, error) { const nonEncodedNameMax = 254 -// A Name is a non-encoded domain name. It is used instead of strings to avoid +// A Name is a non-encoded and non-escaped domain name. It is used instead of strings to avoid // allocations. type Name struct { Data [255]byte @@ -1919,6 +1936,8 @@ func MustNewName(name string) Name { } // String implements fmt.Stringer.String. +// +// Note: characters inside the labels are not escaped in any way. func (n Name) String() string { return string(n.Data[:n.Length]) } @@ -1935,7 +1954,7 @@ func (n *Name) GoString() string { // // The compression map will be updated with new domain suffixes. If compression // is nil, compression will not be used. -func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (n *Name) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { oldMsg := msg if n.Length > nonEncodedNameMax { @@ -1952,6 +1971,8 @@ func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) return append(msg, 0), nil } + var nameAsStr string + // Emit sequence of counted strings, chopping at dots. for i, begin := 0, 0; i < int(n.Length); i++ { // Check for the end of the segment. @@ -1982,16 +2003,22 @@ func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) // segment. A pointer is two bytes with the two most significant // bits set to 1 to indicate that it is a pointer. if (i == 0 || n.Data[i-1] == '.') && compression != nil { - if ptr, ok := compression[string(n.Data[i:])]; ok { + if ptr, ok := compression[string(n.Data[i:n.Length])]; ok { // Hit. Emit a pointer instead of the rest of // the domain. return append(msg, byte(ptr>>8|0xC0), byte(ptr)), nil } // Miss. Add the suffix to the compression table if the - // offset can be stored in the available 14 bytes. - if len(msg) <= int(^uint16(0)>>2) { - compression[string(n.Data[i:])] = len(msg) - compressionOff + // offset can be stored in the available 14 bits. + newPtr := len(msg) - compressionOff + if newPtr <= int(^uint16(0)>>2) { + if nameAsStr == "" { + // allocate n.Data on the heap once, to avoid allocating it + // multiple times (for next labels). + nameAsStr = string(n.Data[:n.Length]) + } + compression[nameAsStr[i:]] = uint16(newPtr) } } } @@ -2000,10 +2027,6 @@ func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) // unpack unpacks a domain name. func (n *Name) unpack(msg []byte, off int) (int, error) { - return n.unpackCompressed(msg, off, true /* allowCompression */) -} - -func (n *Name) unpackCompressed(msg []byte, off int, allowCompression bool) (int, error) { // currOff is the current working offset. currOff := off @@ -2048,9 +2071,6 @@ Loop: name = append(name, '.') currOff = endOff case 0xC0: // Pointer - if !allowCompression { - return off, errCompressedSRV - } if currOff >= len(msg) { return off, errInvalidPtr } @@ -2131,7 +2151,7 @@ type Question struct { } // pack appends the wire format of the Question to msg. -func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (q *Question) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { msg, err := q.Name.pack(msg, compression, compressionOff) if err != nil { return msg, &nestedError{"Name", err} @@ -2227,7 +2247,7 @@ func (r *CNAMEResource) realType() Type { } // pack appends the wire format of the CNAMEResource to msg. -func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *CNAMEResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { return r.CNAME.pack(msg, compression, compressionOff) } @@ -2255,7 +2275,7 @@ func (r *MXResource) realType() Type { } // pack appends the wire format of the MXResource to msg. -func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *MXResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { oldMsg := msg msg = packUint16(msg, r.Pref) msg, err := r.MX.pack(msg, compression, compressionOff) @@ -2294,7 +2314,7 @@ func (r *NSResource) realType() Type { } // pack appends the wire format of the NSResource to msg. -func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *NSResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { return r.NS.pack(msg, compression, compressionOff) } @@ -2321,7 +2341,7 @@ func (r *PTRResource) realType() Type { } // pack appends the wire format of the PTRResource to msg. -func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *PTRResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { return r.PTR.pack(msg, compression, compressionOff) } @@ -2358,7 +2378,7 @@ func (r *SOAResource) realType() Type { } // pack appends the wire format of the SOAResource to msg. -func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *SOAResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { oldMsg := msg msg, err := r.NS.pack(msg, compression, compressionOff) if err != nil { @@ -2430,7 +2450,7 @@ func (r *TXTResource) realType() Type { } // pack appends the wire format of the TXTResource to msg. -func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *TXTResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { oldMsg := msg for _, s := range r.TXT { var err error @@ -2486,7 +2506,7 @@ func (r *SRVResource) realType() Type { } // pack appends the wire format of the SRVResource to msg. -func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *SRVResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { oldMsg := msg msg = packUint16(msg, r.Priority) msg = packUint16(msg, r.Weight) @@ -2521,7 +2541,7 @@ func unpackSRVResource(msg []byte, off int) (SRVResource, error) { return SRVResource{}, &nestedError{"Port", err} } var target Name - if _, err := target.unpackCompressed(msg, off, false /* allowCompression */); err != nil { + if _, err := target.unpack(msg, off); err != nil { return SRVResource{}, &nestedError{"Target", err} } return SRVResource{priority, weight, port, target}, nil @@ -2537,7 +2557,7 @@ func (r *AResource) realType() Type { } // pack appends the wire format of the AResource to msg. -func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *AResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { return packBytes(msg, r.A[:]), nil } @@ -2571,7 +2591,7 @@ func (r *AAAAResource) GoString() string { } // pack appends the wire format of the AAAAResource to msg. -func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *AAAAResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { return packBytes(msg, r.AAAA[:]), nil } @@ -2611,7 +2631,7 @@ func (r *OPTResource) realType() Type { return TypeOPT } -func (r *OPTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *OPTResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { for _, opt := range r.Options { msg = packUint16(msg, opt.Code) l := uint16(len(opt.Data)) @@ -2669,7 +2689,7 @@ func (r *UnknownResource) realType() Type { } // pack appends the wire format of the UnknownResource to msg. -func (r *UnknownResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { +func (r *UnknownResource) pack(msg []byte, compression map[string]uint16, compressionOff int) ([]byte, error) { return packBytes(msg, r.Data[:]), nil } diff --git a/dns/dnsmessage/message_test.go b/dns/dnsmessage/message_test.go index ce2716e42..255530598 100644 --- a/dns/dnsmessage/message_test.go +++ b/dns/dnsmessage/message_test.go @@ -164,7 +164,7 @@ func TestQuestionPackUnpack(t *testing.T) { Type: TypeA, Class: ClassINET, } - buf, err := want.pack(make([]byte, 1, 50), map[string]int{}, 1) + buf, err := want.pack(make([]byte, 1, 50), map[string]uint16{}, 1) if err != nil { t.Fatal("Question.pack() =", err) } @@ -243,7 +243,7 @@ func TestNamePackUnpack(t *testing.T) { for _, test := range tests { in := MustNewName(test.in) - buf, err := in.pack(make([]byte, 0, 30), map[string]int{}, 0) + buf, err := in.pack(make([]byte, 0, 30), map[string]uint16{}, 0) if err != test.err { t.Errorf("got %q.pack() = %v, want = %v", test.in, err, test.err) continue @@ -303,28 +303,6 @@ func TestNameUnpackTooLongName(t *testing.T) { } } -func TestIncompressibleName(t *testing.T) { - name := MustNewName("example.com.") - compression := map[string]int{} - buf, err := name.pack(make([]byte, 0, 100), compression, 0) - if err != nil { - t.Fatal("first Name.pack() =", err) - } - buf, err = name.pack(buf, compression, 0) - if err != nil { - t.Fatal("second Name.pack() =", err) - } - var n1 Name - off, err := n1.unpackCompressed(buf, 0, false /* allowCompression */) - if err != nil { - t.Fatal("unpacking incompressible name without pointers failed:", err) - } - var n2 Name - if _, err := n2.unpackCompressed(buf, off, false /* allowCompression */); err != errCompressedSRV { - t.Errorf("unpacking compressed incompressible name with pointers: got %v, want = %v", err, errCompressedSRV) - } -} - func checkErrorPrefix(err error, prefix string) bool { e, ok := err.(*nestedError) return ok && e.s == prefix @@ -623,7 +601,7 @@ func TestVeryLongTxt(t *testing.T) { strings.Repeat(".", 255), }}, } - buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}, 0) + buf, err := want.pack(make([]byte, 0, 8000), map[string]uint16{}, 0) if err != nil { t.Fatal("Resource.pack() =", err) } @@ -647,7 +625,7 @@ func TestVeryLongTxt(t *testing.T) { func TestTooLongTxt(t *testing.T) { rb := TXTResource{[]string{strings.Repeat(".", 256)}} - if _, err := rb.pack(make([]byte, 0, 8000), map[string]int{}, 0); err != errStringTooLong { + if _, err := rb.pack(make([]byte, 0, 8000), map[string]uint16{}, 0); err != errStringTooLong { t.Errorf("packing TXTResource with 256 character string: got err = %v, want = %v", err, errStringTooLong) } } @@ -1185,8 +1163,7 @@ func TestGoString(t *testing.T) { t.Error("Message.GoString lost information or largeTestMsg changed: msg != largeTestMsg()") } got := msg.GoString() - - want := `dnsmessage.Message{Header: dnsmessage.Header{ID: 0, Response: true, OpCode: 0, Authoritative: true, Truncated: false, RecursionDesired: false, RecursionAvailable: false, RCode: dnsmessage.RCodeSuccess}, Questions: []dnsmessage.Question{dnsmessage.Question{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET}}, Answers: []dnsmessage.Resource{dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 1}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 2}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeAAAA, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.AAAAResource{AAAA: [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeCNAME, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.CNAMEResource{CNAME: dnsmessage.MustNewName("alias.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeSOA, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.SOAResource{NS: dnsmessage.MustNewName("ns1.example.com."), MBox: dnsmessage.MustNewName("mb.example.com."), Serial: 1, Refresh: 2, Retry: 3, Expire: 4, MinTTL: 5}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypePTR, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.PTRResource{PTR: dnsmessage.MustNewName("ptr.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeMX, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.MXResource{Pref: 7, MX: dnsmessage.MustNewName("mx.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeSRV, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.SRVResource{Priority: 8, Weight: 9, Port: 11, Target: dnsmessage.MustNewName("srv.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: 65362, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.UnknownResource{Type: 65362, Data: []byte{42, 0, 43, 44}}}}, Authorities: []dnsmessage.Resource{dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeNS, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.NSResource{NS: dnsmessage.MustNewName("ns1.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeNS, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.NSResource{NS: dnsmessage.MustNewName("ns2.example.com.")}}}, Additionals: []dnsmessage.Resource{dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeTXT, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.TXTResource{TXT: []string{"So Long\x2c and Thanks for All the Fish"}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeTXT, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.TXTResource{TXT: []string{"Hamster Huey and the Gooey Kablooie"}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("."), Type: dnsmessage.TypeOPT, Class: 4096, TTL: 4261412864, Length: 0}, Body: &dnsmessage.OPTResource{Options: []dnsmessage.Option{dnsmessage.Option{Code: 10, Data: []byte{1, 35, 69, 103, 137, 171, 205, 239}}}}}}}` + want := `dnsmessage.Message{Header: dnsmessage.Header{ID: 0, Response: true, OpCode: 0, Authoritative: true, Truncated: false, RecursionDesired: false, RecursionAvailable: false, AuthenticData: false, CheckingDisabled: false, RCode: dnsmessage.RCodeSuccess}, Questions: []dnsmessage.Question{dnsmessage.Question{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET}}, Answers: []dnsmessage.Resource{dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 1}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 2}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeAAAA, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.AAAAResource{AAAA: [16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeCNAME, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.CNAMEResource{CNAME: dnsmessage.MustNewName("alias.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeSOA, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.SOAResource{NS: dnsmessage.MustNewName("ns1.example.com."), MBox: dnsmessage.MustNewName("mb.example.com."), Serial: 1, Refresh: 2, Retry: 3, Expire: 4, MinTTL: 5}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypePTR, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.PTRResource{PTR: dnsmessage.MustNewName("ptr.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeMX, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.MXResource{Pref: 7, MX: dnsmessage.MustNewName("mx.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeSRV, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.SRVResource{Priority: 8, Weight: 9, Port: 11, Target: dnsmessage.MustNewName("srv.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: 65362, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.UnknownResource{Type: 65362, Data: []byte{42, 0, 43, 44}}}}, Authorities: []dnsmessage.Resource{dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeNS, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.NSResource{NS: dnsmessage.MustNewName("ns1.example.com.")}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeNS, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.NSResource{NS: dnsmessage.MustNewName("ns2.example.com.")}}}, Additionals: []dnsmessage.Resource{dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeTXT, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.TXTResource{TXT: []string{"So Long\x2c and Thanks for All the Fish"}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("foo.bar.example.com."), Type: dnsmessage.TypeTXT, Class: dnsmessage.ClassINET, TTL: 0, Length: 0}, Body: &dnsmessage.TXTResource{TXT: []string{"Hamster Huey and the Gooey Kablooie"}}}, dnsmessage.Resource{Header: dnsmessage.ResourceHeader{Name: dnsmessage.MustNewName("."), Type: dnsmessage.TypeOPT, Class: 4096, TTL: 4261412864, Length: 0}, Body: &dnsmessage.OPTResource{Options: []dnsmessage.Option{dnsmessage.Option{Code: 10, Data: []byte{1, 35, 69, 103, 137, 171, 205, 239}}}}}}}` if got != want { t.Errorf("got msg1.GoString() = %s\nwant = %s", got, want) @@ -1643,3 +1620,235 @@ func TestNoFmt(t *testing.T) { } } } + +func FuzzUnpackPack(f *testing.F) { + for _, msg := range []Message{smallTestMsg(), largeTestMsg()} { + bytes, _ := msg.Pack() + f.Add(bytes) + } + + f.Fuzz(func(t *testing.T, msg []byte) { + var m Message + if err := m.Unpack(msg); err != nil { + return + } + + msgPacked, err := m.Pack() + if err != nil { + t.Fatalf("failed to pack message that was successfully unpacked: %v", err) + } + + var m2 Message + if err := m2.Unpack(msgPacked); err != nil { + t.Fatalf("failed to unpack message that was succesfully packed: %v", err) + } + + if !reflect.DeepEqual(m, m2) { + t.Fatal("unpack(msg) is not deep equal to unpack(pack(unpack(msg)))") + } + }) +} + +func TestParseResourceHeaderMultipleTimes(t *testing.T) { + msg := Message{ + Header: Header{Response: true, Authoritative: true}, + Answers: []Resource{ + { + ResourceHeader{ + Name: MustNewName("go.dev."), + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + }, + Authorities: []Resource{ + { + ResourceHeader{ + Name: MustNewName("go.dev."), + Type: TypeA, + Class: ClassINET, + }, + &AResource{[4]byte{127, 0, 0, 1}}, + }, + }, + } + + raw, err := msg.Pack() + if err != nil { + t.Fatal(err) + } + + var p Parser + + if _, err := p.Start(raw); err != nil { + t.Fatal(err) + } + + if err := p.SkipAllQuestions(); err != nil { + t.Fatal(err) + } + + hdr1, err := p.AnswerHeader() + if err != nil { + t.Fatal(err) + } + + hdr2, err := p.AnswerHeader() + if err != nil { + t.Fatal(err) + } + + if hdr1 != hdr2 { + t.Fatal("AnswerHeader called multiple times without parsing the RData returned different headers") + } + + if _, err := p.AResource(); err != nil { + t.Fatal(err) + } + + if _, err := p.AnswerHeader(); err != ErrSectionDone { + t.Fatalf("unexpected error: %v, want: %v", err, ErrSectionDone) + } + + hdr3, err := p.AuthorityHeader() + if err != nil { + t.Fatal(err) + } + + hdr4, err := p.AuthorityHeader() + if err != nil { + t.Fatal(err) + } + + if hdr3 != hdr4 { + t.Fatal("AuthorityHeader called multiple times without parsing the RData returned different headers") + } + + if _, err := p.AResource(); err != nil { + t.Fatal(err) + } + + if _, err := p.AuthorityHeader(); err != ErrSectionDone { + t.Fatalf("unexpected error: %v, want: %v", err, ErrSectionDone) + } +} + +func TestParseDifferentResourceHeadersWithoutParsingRData(t *testing.T) { + msg := smallTestMsg() + raw, err := msg.Pack() + if err != nil { + t.Fatal(err) + } + + var p Parser + if _, err := p.Start(raw); err != nil { + t.Fatal(err) + } + + if err := p.SkipAllQuestions(); err != nil { + t.Fatal(err) + } + + if _, err := p.AnswerHeader(); err != nil { + t.Fatal(err) + } + + if _, err := p.AdditionalHeader(); err == nil { + t.Errorf("p.AdditionalHeader() unexpected success") + } + + if _, err := p.AuthorityHeader(); err == nil { + t.Errorf("p.AuthorityHeader() unexpected success") + } +} + +func TestParseWrongSection(t *testing.T) { + msg := smallTestMsg() + raw, err := msg.Pack() + if err != nil { + t.Fatal(err) + } + + var p Parser + if _, err := p.Start(raw); err != nil { + t.Fatal(err) + } + + if err := p.SkipAllQuestions(); err != nil { + t.Fatalf("p.SkipAllQuestions() = %v", err) + } + if _, err := p.AnswerHeader(); err != nil { + t.Fatalf("p.AnswerHeader() = %v", err) + } + if _, err := p.AuthorityHeader(); err == nil { + t.Fatalf("p.AuthorityHeader(): unexpected success in Answer section") + } + if err := p.SkipAuthority(); err == nil { + t.Fatalf("p.SkipAuthority(): unexpected success in Answer section") + } + if err := p.SkipAllAuthorities(); err == nil { + t.Fatalf("p.SkipAllAuthorities(): unexpected success in Answer section") + } +} + +func TestBuilderNameCompressionWithNonZeroedName(t *testing.T) { + b := NewBuilder(nil, Header{}) + b.EnableCompression() + if err := b.StartQuestions(); err != nil { + t.Fatalf("b.StartQuestions() unexpected error: %v", err) + } + + name := MustNewName("go.dev.") + if err := b.Question(Question{Name: name}); err != nil { + t.Fatalf("b.Question() unexpected error: %v", err) + } + + // Character that is not part of the name (name.Data[:name.Length]), + // shouldn't affect the compression algorithm. + name.Data[name.Length] = '1' + if err := b.Question(Question{Name: name}); err != nil { + t.Fatalf("b.Question() unexpected error: %v", err) + } + + msg, err := b.Finish() + if err != nil { + t.Fatalf("b.Finish() unexpected error: %v", err) + } + + expect := []byte{ + 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, // header + 2, 'g', 'o', 3, 'd', 'e', 'v', 0, 0, 0, 0, 0, // question 1 + 0xC0, 12, 0, 0, 0, 0, // question 2 + } + if !bytes.Equal(msg, expect) { + t.Fatalf("b.Finish() = %v, want: %v", msg, expect) + } +} + +func TestBuilderCompressionInAppendMode(t *testing.T) { + maxPtr := int(^uint16(0) >> 2) + b := NewBuilder(make([]byte, maxPtr, maxPtr+512), Header{}) + b.EnableCompression() + if err := b.StartQuestions(); err != nil { + t.Fatalf("b.StartQuestions() unexpected error: %v", err) + } + if err := b.Question(Question{Name: MustNewName("go.dev.")}); err != nil { + t.Fatalf("b.Question() unexpected error: %v", err) + } + if err := b.Question(Question{Name: MustNewName("go.dev.")}); err != nil { + t.Fatalf("b.Question() unexpected error: %v", err) + } + msg, err := b.Finish() + if err != nil { + t.Fatalf("b.Finish() unexpected error: %v", err) + } + expect := []byte{ + 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, // header + 2, 'g', 'o', 3, 'd', 'e', 'v', 0, 0, 0, 0, 0, // question 1 + 0xC0, 12, 0, 0, 0, 0, // question 2 + } + if !bytes.Equal(msg[maxPtr:], expect) { + t.Fatalf("msg[maxPtr:] = %v, want: %v", msg[maxPtr:], expect) + } +} diff --git a/go.mod b/go.mod index 018af6f4e..36207106d 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,10 @@ module golang.org/x/net -go 1.17 +go 1.18 require ( - golang.org/x/crypto v0.11.0 - golang.org/x/sys v0.10.0 - golang.org/x/term v0.10.0 - golang.org/x/text v0.11.0 + golang.org/x/crypto v0.21.0 + golang.org/x/sys v0.18.0 + golang.org/x/term v0.18.0 + golang.org/x/text v0.14.0 ) diff --git a/go.sum b/go.sum index a9f84de71..69fb10498 100644 --- a/go.sum +++ b/go.sum @@ -1,42 +1,8 @@ -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= diff --git a/html/atom/gen.go b/html/atom/gen.go index 5b0aaf737..5d85c604d 100644 --- a/html/atom/gen.go +++ b/html/atom/gen.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ignore -// +build ignore //go:generate go run gen.go //go:generate go run gen.go -test diff --git a/html/render.go b/html/render.go index 8b2803190..e8c123345 100644 --- a/html/render.go +++ b/html/render.go @@ -194,9 +194,8 @@ func render1(w writer, n *Node) error { } } - // Render any child nodes. - switch n.Data { - case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + // Render any child nodes + if childTextNodesAreLiteral(n) { for c := n.FirstChild; c != nil; c = c.NextSibling { if c.Type == TextNode { if _, err := w.WriteString(c.Data); err != nil { @@ -213,7 +212,7 @@ func render1(w writer, n *Node) error { // last element in the file, with no closing tag. return plaintextAbort } - default: + } else { for c := n.FirstChild; c != nil; c = c.NextSibling { if err := render1(w, c); err != nil { return err @@ -231,6 +230,27 @@ func render1(w writer, n *Node) error { return w.WriteByte('>') } +func childTextNodesAreLiteral(n *Node) bool { + // Per WHATWG HTML 13.3, if the parent of the current node is a style, + // script, xmp, iframe, noembed, noframes, or plaintext element, and the + // current node is a text node, append the value of the node's data + // literally. The specification is not explicit about it, but we only + // enforce this if we are in the HTML namespace (i.e. when the namespace is + // ""). + // NOTE: we also always include noscript elements, although the + // specification states that they should only be rendered as such if + // scripting is enabled for the node (which is not something we track). + if n.Namespace != "" { + return false + } + switch n.Data { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + return true + default: + return false + } +} + // writeQuoted writes s to w surrounded by quotes. Normally it will use double // quotes, but if s contains a double quote, it will use single quotes. // It is used for writing the identifiers in a doctype declaration. diff --git a/html/render_test.go b/html/render_test.go index 08e592be2..22d08641a 100644 --- a/html/render_test.go +++ b/html/render_test.go @@ -6,6 +6,8 @@ package html import ( "bytes" + "fmt" + "strings" "testing" ) @@ -108,16 +110,16 @@ func TestRenderer(t *testing.T) { // just commentary. The "0:" prefixes are for easy cross-reference with // the nodes array. treeAsText := [...]string{ - 0: ``, - 1: `.
`, - 2: `. `, - 3: `. . "0<1"`, - 4: `. .`, - 5: `. . . "2"`, - 6: `. . . `, - 7: `. . . . "3"`, - 8: `. . . `, - 9: `. . . . "&4"`, + 0: ``, + 1: `. `, + 2: `. `, + 3: `. . "0<1"`, + 4: `. .
`,
+ 5: `. . . "2"`,
+ 6: `. . . `,
+ 7: `. . . . "3"`,
+ 8: `. . . `,
+ 9: `. . . . "&4"`,
10: `. . "5"`,
11: `. . `,
` `,
},
+ {
+ "forward slash before attribute name",
+ ` `,
+ ` `,
+ },
+ {
+ "forward slash before attribute name with spaces around",
+ ` `,
+ ` `,
+ },
+ {
+ "forward slash after attribute name followed by a character",
+ ` `,
+ ` `,
+ },
}
func TestTokenizer(t *testing.T) {
diff --git a/http/httpproxy/go19_test.go b/http/httpproxy/go19_test.go
index 5f6e3d7ff..5fca5ac45 100644
--- a/http/httpproxy/go19_test.go
+++ b/http/httpproxy/go19_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.9
-// +build go1.9
package httpproxy_test
diff --git a/http/httpproxy/proxy.go b/http/httpproxy/proxy.go
index c3bd9a1ee..6404aaf15 100644
--- a/http/httpproxy/proxy.go
+++ b/http/httpproxy/proxy.go
@@ -149,10 +149,7 @@ func parseProxy(proxy string) (*url.URL, error) {
}
proxyURL, err := url.Parse(proxy)
- if err != nil ||
- (proxyURL.Scheme != "http" &&
- proxyURL.Scheme != "https" &&
- proxyURL.Scheme != "socks5") {
+ if err != nil || proxyURL.Scheme == "" || proxyURL.Host == "" {
// proxy was bogus. Try prepending "http://" to it and
// see if that parses correctly. If not, we fall
// through and complain about the original one.
diff --git a/http/httpproxy/proxy_test.go b/http/httpproxy/proxy_test.go
index d76373295..790afdab7 100644
--- a/http/httpproxy/proxy_test.go
+++ b/http/httpproxy/proxy_test.go
@@ -68,6 +68,12 @@ var proxyForURLTests = []proxyForURLTest{{
HTTPProxy: "cache.corp.example.com",
},
want: "http://cache.corp.example.com",
+}, {
+ // single label domain is recognized as scheme by url.Parse
+ cfg: httpproxy.Config{
+ HTTPProxy: "localhost",
+ },
+ want: "http://localhost",
}, {
cfg: httpproxy.Config{
HTTPProxy: "https://cache.corp.example.com",
@@ -88,6 +94,12 @@ var proxyForURLTests = []proxyForURLTest{{
HTTPProxy: "socks5://127.0.0.1",
},
want: "socks5://127.0.0.1",
+}, {
+ // Preserve unknown schemes.
+ cfg: httpproxy.Config{
+ HTTPProxy: "foo://host",
+ },
+ want: "foo://host",
}, {
// Don't use secure for http
cfg: httpproxy.Config{
diff --git a/http2/Dockerfile b/http2/Dockerfile
deleted file mode 100644
index 851224595..000000000
--- a/http2/Dockerfile
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# This Dockerfile builds a recent curl with HTTP/2 client support, using
-# a recent nghttp2 build.
-#
-# See the Makefile for how to tag it. If Docker and that image is found, the
-# Go tests use this curl binary for integration tests.
-#
-
-FROM ubuntu:trusty
-
-RUN apt-get update && \
- apt-get upgrade -y && \
- apt-get install -y git-core build-essential wget
-
-RUN apt-get install -y --no-install-recommends \
- autotools-dev libtool pkg-config zlib1g-dev \
- libcunit1-dev libssl-dev libxml2-dev libevent-dev \
- automake autoconf
-
-# The list of packages nghttp2 recommends for h2load:
-RUN apt-get install -y --no-install-recommends make binutils \
- autoconf automake autotools-dev \
- libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
- libev-dev libevent-dev libjansson-dev libjemalloc-dev \
- cython python3.4-dev python-setuptools
-
-# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
-ENV NGHTTP2_VER 895da9a
-RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
-
-WORKDIR /root/nghttp2
-RUN git reset --hard $NGHTTP2_VER
-RUN autoreconf -i
-RUN automake
-RUN autoconf
-RUN ./configure
-RUN make
-RUN make install
-
-WORKDIR /root
-RUN wget https://curl.se/download/curl-7.45.0.tar.gz
-RUN tar -zxvf curl-7.45.0.tar.gz
-WORKDIR /root/curl-7.45.0
-RUN ./configure --with-ssl --with-nghttp2=/usr/local
-RUN make
-RUN make install
-RUN ldconfig
-
-CMD ["-h"]
-ENTRYPOINT ["/usr/local/bin/curl"]
-
diff --git a/http2/Makefile b/http2/Makefile
deleted file mode 100644
index 55fd826f7..000000000
--- a/http2/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-curlimage:
- docker build -t gohttp2/curl .
-
diff --git a/http2/clientconn_test.go b/http2/clientconn_test.go
new file mode 100644
index 000000000..4237b1436
--- /dev/null
+++ b/http2/clientconn_test.go
@@ -0,0 +1,829 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Infrastructure for testing ClientConn.RoundTrip.
+// Put actual tests in transport_test.go.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "reflect"
+ "slices"
+ "testing"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+// TestTestClientConn demonstrates usage of testClientConn.
+func TestTestClientConn(t *testing.T) {
+ // newTestClientConn creates a *ClientConn and surrounding test infrastructure.
+ tc := newTestClientConn(t)
+
+ // tc.greet reads the client's initial SETTINGS and WINDOW_UPDATE frames,
+ // and sends a SETTINGS frame to the client.
+ //
+ // Additional settings may be provided as optional parameters to greet.
+ tc.greet()
+
+ // Request bodies must either be constant (bytes.Buffer, strings.Reader)
+ // or created with newRequestBody.
+ body := tc.newRequestBody()
+ body.writeBytes(10) // 10 arbitrary bytes...
+ body.closeWithError(io.EOF) // ...followed by EOF.
+
+ // tc.roundTrip calls RoundTrip, but does not wait for it to return.
+ // It returns a testRoundTrip.
+ req, _ := http.NewRequest("PUT", "https://dummy.tld/", body)
+ rt := tc.roundTrip(req)
+
+ // tc has a number of methods to check for expected frames sent.
+ // Here, we look for headers and the request body.
+ tc.wantHeaders(wantHeader{
+ streamID: rt.streamID(),
+ endStream: false,
+ header: http.Header{
+ ":authority": []string{"dummy.tld"},
+ ":method": []string{"PUT"},
+ ":path": []string{"/"},
+ },
+ })
+ // Expect 10 bytes of request body in DATA frames.
+ tc.wantData(wantData{
+ streamID: rt.streamID(),
+ endStream: true,
+ size: 10,
+ })
+
+ // tc.writeHeaders sends a HEADERS frame back to the client.
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+
+ // Now that we've received headers, RoundTrip has finished.
+ // testRoundTrip has various methods to examine the response,
+ // or to fetch the response and/or error returned by RoundTrip
+ rt.wantStatus(200)
+ rt.wantBody(nil)
+}
+
+// A testClientConn allows testing ClientConn.RoundTrip against a fake server.
+//
+// A test using testClientConn consists of:
+// - actions on the client (calling RoundTrip, making data available to Request.Body);
+// - validation of frames sent by the client to the server; and
+// - providing frames from the server to the client.
+//
+// testClientConn manages synchronization, so tests can generally be written as
+// a linear sequence of actions and validations without additional synchronization.
+type testClientConn struct {
+ t *testing.T
+
+ tr *Transport
+ fr *Framer
+ cc *ClientConn
+ hooks *testSyncHooks
+
+ encbuf bytes.Buffer
+ enc *hpack.Encoder
+
+ roundtrips []*testRoundTrip
+
+ rerr error // returned by Read
+ netConnClosed bool // set when the ClientConn closes the net.Conn
+ rbuf bytes.Buffer // sent to the test conn
+ wbuf bytes.Buffer // sent by the test conn
+}
+
+func newTestClientConnFromClientConn(t *testing.T, cc *ClientConn) *testClientConn {
+ tc := &testClientConn{
+ t: t,
+ tr: cc.t,
+ cc: cc,
+ hooks: cc.t.syncHooks,
+ }
+ cc.tconn = (*testClientConnNetConn)(tc)
+ tc.enc = hpack.NewEncoder(&tc.encbuf)
+ tc.fr = NewFramer(&tc.rbuf, &tc.wbuf)
+ tc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ tc.fr.SetMaxReadFrameSize(10 << 20)
+ t.Cleanup(func() {
+ tc.sync()
+ if tc.rerr == nil {
+ tc.rerr = io.EOF
+ }
+ tc.sync()
+ })
+ return tc
+}
+
+func (tc *testClientConn) readClientPreface() {
+ tc.t.Helper()
+ // Read the client's HTTP/2 preface, sent prior to any HTTP/2 frames.
+ buf := make([]byte, len(clientPreface))
+ if _, err := io.ReadFull(&tc.wbuf, buf); err != nil {
+ tc.t.Fatalf("reading preface: %v", err)
+ }
+ if !bytes.Equal(buf, clientPreface) {
+ tc.t.Fatalf("client preface: %q, want %q", buf, clientPreface)
+ }
+}
+
+func newTestClientConn(t *testing.T, opts ...func(*Transport)) *testClientConn {
+ t.Helper()
+
+ tt := newTestTransport(t, opts...)
+ const singleUse = false
+ _, err := tt.tr.newClientConn(nil, singleUse, tt.tr.syncHooks)
+ if err != nil {
+ t.Fatalf("newClientConn: %v", err)
+ }
+
+ return tt.getConn()
+}
+
+// sync waits for the ClientConn under test to reach a stable state,
+// with all goroutines blocked on some input.
+func (tc *testClientConn) sync() {
+ tc.hooks.waitInactive()
+}
+
+// advance advances synthetic time by a duration.
+func (tc *testClientConn) advance(d time.Duration) {
+ tc.hooks.advance(d)
+ tc.sync()
+}
+
+// hasFrame reports whether a frame is available to be read.
+func (tc *testClientConn) hasFrame() bool {
+ return tc.wbuf.Len() > 0
+}
+
+// readFrame reads the next frame from the conn.
+func (tc *testClientConn) readFrame() Frame {
+ if tc.wbuf.Len() == 0 {
+ return nil
+ }
+ fr, err := tc.fr.ReadFrame()
+ if err != nil {
+ return nil
+ }
+ return fr
+}
+
+// testClientConnReadFrame reads a frame of a specific type from the conn.
+func testClientConnReadFrame[T any](tc *testClientConn) T {
+ tc.t.Helper()
+ var v T
+ fr := tc.readFrame()
+ if fr == nil {
+ tc.t.Fatalf("got no frame, want frame %T", v)
+ }
+ v, ok := fr.(T)
+ if !ok {
+ tc.t.Fatalf("got frame %T, want %T", fr, v)
+ }
+ return v
+}
+
+// wantFrameType reads the next frame from the conn.
+// It produces an error if the frame type is not the expected value.
+func (tc *testClientConn) wantFrameType(want FrameType) {
+ tc.t.Helper()
+ fr := tc.readFrame()
+ if fr == nil {
+ tc.t.Fatalf("got no frame, want frame %v", want)
+ }
+ if got := fr.Header().Type; got != want {
+ tc.t.Fatalf("got frame %v, want %v", got, want)
+ }
+}
+
+// wantUnorderedFrames reads frames from the conn until every condition in want has been satisfied.
+//
+// want is a list of func(*SomeFrame) bool.
+// wantUnorderedFrames will call each func with frames of the appropriate type
+// until the func returns true.
+// It calls t.Fatal if an unexpected frame is received (no func has that frame type,
+// or all funcs with that type have returned true), or if the conn runs out of frames
+// with unsatisfied funcs.
+//
+// Example:
+//
+// // Read a SETTINGS frame, and any number of DATA frames for a stream.
+// // The SETTINGS frame may appear anywhere in the sequence.
+// // The last DATA frame must indicate the end of the stream.
+// tc.wantUnorderedFrames(
+// func(f *SettingsFrame) bool {
+// return true
+// },
+// func(f *DataFrame) bool {
+// return f.StreamEnded()
+// },
+// )
+func (tc *testClientConn) wantUnorderedFrames(want ...any) {
+ tc.t.Helper()
+ want = slices.Clone(want)
+ seen := 0
+frame:
+ for seen < len(want) && !tc.t.Failed() {
+ fr := tc.readFrame()
+ if fr == nil {
+ break
+ }
+ for i, f := range want {
+ if f == nil {
+ continue
+ }
+ typ := reflect.TypeOf(f)
+ if typ.Kind() != reflect.Func ||
+ typ.NumIn() != 1 ||
+ typ.NumOut() != 1 ||
+ typ.Out(0) != reflect.TypeOf(true) {
+ tc.t.Fatalf("expected func(*SomeFrame) bool, got %T", f)
+ }
+ if typ.In(0) == reflect.TypeOf(fr) {
+ out := reflect.ValueOf(f).Call([]reflect.Value{reflect.ValueOf(fr)})
+ if out[0].Bool() {
+ want[i] = nil
+ seen++
+ }
+ continue frame
+ }
+ }
+ tc.t.Errorf("got unexpected frame type %T", fr)
+ }
+ if seen < len(want) {
+ for _, f := range want {
+ if f == nil {
+ continue
+ }
+ tc.t.Errorf("did not see expected frame: %v", reflect.TypeOf(f).In(0))
+ }
+ tc.t.Fatalf("did not see %v expected frame types", len(want)-seen)
+ }
+}
+
+type wantHeader struct {
+ streamID uint32
+ endStream bool
+ header http.Header
+}
+
+// wantHeaders reads a HEADERS frame and potential CONTINUATION frames,
+// and asserts that they contain the expected headers.
+func (tc *testClientConn) wantHeaders(want wantHeader) {
+ tc.t.Helper()
+ got := testClientConnReadFrame[*MetaHeadersFrame](tc)
+ if got, want := got.StreamID, want.streamID; got != want {
+ tc.t.Fatalf("got stream ID %v, want %v", got, want)
+ }
+ if got, want := got.StreamEnded(), want.endStream; got != want {
+ tc.t.Fatalf("got stream ended %v, want %v", got, want)
+ }
+ gotHeader := make(http.Header)
+ for _, f := range got.Fields {
+ gotHeader[f.Name] = append(gotHeader[f.Name], f.Value)
+ }
+ for k, v := range want.header {
+ if !reflect.DeepEqual(v, gotHeader[k]) {
+ tc.t.Fatalf("got header %q = %q; want %q", k, v, gotHeader[k])
+ }
+ }
+}
+
+type wantData struct {
+ streamID uint32
+ endStream bool
+ size int
+}
+
+// wantData reads zero or more DATA frames, and asserts that they match the expectation.
+func (tc *testClientConn) wantData(want wantData) {
+ tc.t.Helper()
+ gotSize := 0
+ gotEndStream := false
+ for tc.hasFrame() && !gotEndStream {
+ data := testClientConnReadFrame[*DataFrame](tc)
+ gotSize += len(data.Data())
+ if data.StreamEnded() {
+ gotEndStream = true
+ }
+ }
+ if gotSize != want.size {
+ tc.t.Fatalf("got %v bytes of DATA frames, want %v", gotSize, want.size)
+ }
+ if gotEndStream != want.endStream {
+ tc.t.Fatalf("after %v bytes of DATA frames, got END_STREAM=%v; want %v", gotSize, gotEndStream, want.endStream)
+ }
+}
+
+// testRequestBody is a Request.Body for use in tests.
+type testRequestBody struct {
+ tc *testClientConn
+
+ // At most one of buf or bytes can be set at any given time:
+ buf bytes.Buffer // specific bytes to read from the body
+ bytes int // body contains this many arbitrary bytes
+
+ err error // read error (comes after any available bytes)
+}
+
+func (tc *testClientConn) newRequestBody() *testRequestBody {
+ b := &testRequestBody{
+ tc: tc,
+ }
+ return b
+}
+
+// Read is called by the ClientConn to read from a request body.
+func (b *testRequestBody) Read(p []byte) (n int, _ error) {
+ b.tc.cc.syncHooks.blockUntil(func() bool {
+ return b.buf.Len() > 0 || b.bytes > 0 || b.err != nil
+ })
+ switch {
+ case b.buf.Len() > 0:
+ return b.buf.Read(p)
+ case b.bytes > 0:
+ if len(p) > b.bytes {
+ p = p[:b.bytes]
+ }
+ b.bytes -= len(p)
+ for i := range p {
+ p[i] = 'A'
+ }
+ return len(p), nil
+ default:
+ return 0, b.err
+ }
+}
+
+// Close is called by the ClientConn when it is done reading from a request body.
+func (b *testRequestBody) Close() error {
+ return nil
+}
+
+// writeBytes adds n arbitrary bytes to the body.
+func (b *testRequestBody) writeBytes(n int) {
+ b.bytes += n
+ b.checkWrite()
+ b.tc.sync()
+}
+
+// Write adds bytes to the body.
+func (b *testRequestBody) Write(p []byte) (int, error) {
+ n, err := b.buf.Write(p)
+ b.checkWrite()
+ b.tc.sync()
+ return n, err
+}
+
+func (b *testRequestBody) checkWrite() {
+ if b.bytes > 0 && b.buf.Len() > 0 {
+ b.tc.t.Fatalf("can't interleave Write and writeBytes on request body")
+ }
+ if b.err != nil {
+ b.tc.t.Fatalf("can't write to request body after closeWithError")
+ }
+}
+
+// closeWithError sets an error which will be returned by Read.
+func (b *testRequestBody) closeWithError(err error) {
+ b.err = err
+ b.tc.sync()
+}
+
+// roundTrip starts a RoundTrip call.
+//
+// (Note that the RoundTrip won't complete until response headers are received,
+// the request times out, or some other terminal condition is reached.)
+func (tc *testClientConn) roundTrip(req *http.Request) *testRoundTrip {
+ rt := &testRoundTrip{
+ t: tc.t,
+ donec: make(chan struct{}),
+ }
+ tc.roundtrips = append(tc.roundtrips, rt)
+ tc.hooks.newstream = func(cs *clientStream) { rt.cs = cs }
+ tc.cc.goRun(func() {
+ defer close(rt.donec)
+ rt.resp, rt.respErr = tc.cc.RoundTrip(req)
+ })
+ tc.sync()
+ tc.hooks.newstream = nil
+
+ tc.t.Cleanup(func() {
+ if !rt.done() {
+ return
+ }
+ res, _ := rt.result()
+ if res != nil {
+ res.Body.Close()
+ }
+ })
+
+ return rt
+}
+
+func (tc *testClientConn) greet(settings ...Setting) {
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+ tc.writeSettings(settings...)
+ tc.writeSettingsAck()
+ tc.wantFrameType(FrameSettings) // acknowledgement
+}
+
+func (tc *testClientConn) writeSettings(settings ...Setting) {
+ tc.t.Helper()
+ if err := tc.fr.WriteSettings(settings...); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+func (tc *testClientConn) writeSettingsAck() {
+ tc.t.Helper()
+ if err := tc.fr.WriteSettingsAck(); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+func (tc *testClientConn) writeData(streamID uint32, endStream bool, data []byte) {
+ tc.t.Helper()
+ if err := tc.fr.WriteData(streamID, endStream, data); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+func (tc *testClientConn) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) {
+ tc.t.Helper()
+ if err := tc.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+// makeHeaderBlockFragment encodes headers in a form suitable for inclusion
+// in a HEADERS or CONTINUATION frame.
+//
+// It takes a list of alernating names and values.
+func (tc *testClientConn) makeHeaderBlockFragment(s ...string) []byte {
+ if len(s)%2 != 0 {
+ tc.t.Fatalf("uneven list of header name/value pairs")
+ }
+ tc.encbuf.Reset()
+ for i := 0; i < len(s); i += 2 {
+ tc.enc.WriteField(hpack.HeaderField{Name: s[i], Value: s[i+1]})
+ }
+ return tc.encbuf.Bytes()
+}
+
+func (tc *testClientConn) writeHeaders(p HeadersFrameParam) {
+ tc.t.Helper()
+ if err := tc.fr.WriteHeaders(p); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+// writeHeadersMode writes header frames, as modified by mode:
+//
+// - noHeader: Don't write the header.
+// - oneHeader: Write a single HEADERS frame.
+// - splitHeader: Write a HEADERS frame and CONTINUATION frame.
+func (tc *testClientConn) writeHeadersMode(mode headerType, p HeadersFrameParam) {
+ tc.t.Helper()
+ switch mode {
+ case noHeader:
+ case oneHeader:
+ tc.writeHeaders(p)
+ case splitHeader:
+ if len(p.BlockFragment) < 2 {
+ panic("too small")
+ }
+ contData := p.BlockFragment[1:]
+ contEnd := p.EndHeaders
+ p.BlockFragment = p.BlockFragment[:1]
+ p.EndHeaders = false
+ tc.writeHeaders(p)
+ tc.writeContinuation(p.StreamID, contEnd, contData)
+ default:
+ panic("bogus mode")
+ }
+}
+
+func (tc *testClientConn) writeContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) {
+ tc.t.Helper()
+ if err := tc.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+func (tc *testClientConn) writeRSTStream(streamID uint32, code ErrCode) {
+ tc.t.Helper()
+ if err := tc.fr.WriteRSTStream(streamID, code); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+func (tc *testClientConn) writePing(ack bool, data [8]byte) {
+ tc.t.Helper()
+ if err := tc.fr.WritePing(ack, data); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+func (tc *testClientConn) writeGoAway(maxStreamID uint32, code ErrCode, debugData []byte) {
+ tc.t.Helper()
+ if err := tc.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+func (tc *testClientConn) writeWindowUpdate(streamID, incr uint32) {
+ tc.t.Helper()
+ if err := tc.fr.WriteWindowUpdate(streamID, incr); err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.sync()
+}
+
+// closeWrite causes the net.Conn used by the ClientConn to return a error
+// from Read calls.
+func (tc *testClientConn) closeWrite(err error) {
+ tc.rerr = err
+ tc.sync()
+}
+
+// inflowWindow returns the amount of inbound flow control available for a stream,
+// or for the connection if streamID is 0.
+func (tc *testClientConn) inflowWindow(streamID uint32) int32 {
+ tc.cc.mu.Lock()
+ defer tc.cc.mu.Unlock()
+ if streamID == 0 {
+ return tc.cc.inflow.avail + tc.cc.inflow.unsent
+ }
+ cs := tc.cc.streams[streamID]
+ if cs == nil {
+ tc.t.Errorf("no stream with id %v", streamID)
+ return -1
+ }
+ return cs.inflow.avail + cs.inflow.unsent
+}
+
+// testRoundTrip manages a RoundTrip in progress.
+type testRoundTrip struct {
+ t *testing.T
+ resp *http.Response
+ respErr error
+ donec chan struct{}
+ cs *clientStream
+}
+
+// streamID returns the HTTP/2 stream ID of the request.
+func (rt *testRoundTrip) streamID() uint32 {
+ if rt.cs == nil {
+ panic("stream ID unknown")
+ }
+ return rt.cs.ID
+}
+
+// done reports whether RoundTrip has returned.
+func (rt *testRoundTrip) done() bool {
+ select {
+ case <-rt.donec:
+ return true
+ default:
+ return false
+ }
+}
+
+// result returns the result of the RoundTrip.
+func (rt *testRoundTrip) result() (*http.Response, error) {
+ t := rt.t
+ t.Helper()
+ select {
+ case <-rt.donec:
+ default:
+ t.Fatalf("RoundTrip is not done; want it to be")
+ }
+ return rt.resp, rt.respErr
+}
+
+// response returns the response of a successful RoundTrip.
+// If the RoundTrip unexpectedly failed, it calls t.Fatal.
+func (rt *testRoundTrip) response() *http.Response {
+ t := rt.t
+ t.Helper()
+ resp, err := rt.result()
+ if err != nil {
+ t.Fatalf("RoundTrip returned unexpected error: %v", rt.respErr)
+ }
+ if resp == nil {
+ t.Fatalf("RoundTrip returned nil *Response and nil error")
+ }
+ return resp
+}
+
+// err returns the (possibly nil) error result of RoundTrip.
+func (rt *testRoundTrip) err() error {
+ t := rt.t
+ t.Helper()
+ _, err := rt.result()
+ return err
+}
+
+// wantStatus indicates the expected response StatusCode.
+func (rt *testRoundTrip) wantStatus(want int) {
+ t := rt.t
+ t.Helper()
+ if got := rt.response().StatusCode; got != want {
+ t.Fatalf("got response status %v, want %v", got, want)
+ }
+}
+
+// body reads the contents of the response body.
+func (rt *testRoundTrip) readBody() ([]byte, error) {
+ t := rt.t
+ t.Helper()
+ return io.ReadAll(rt.response().Body)
+}
+
+// wantBody indicates the expected response body.
+// (Note that this consumes the body.)
+func (rt *testRoundTrip) wantBody(want []byte) {
+ t := rt.t
+ t.Helper()
+ got, err := rt.readBody()
+ if err != nil {
+ t.Fatalf("unexpected error reading response body: %v", err)
+ }
+ if !bytes.Equal(got, want) {
+ t.Fatalf("unexpected response body:\ngot: %q\nwant: %q", got, want)
+ }
+}
+
+// wantHeaders indicates the expected response headers.
+func (rt *testRoundTrip) wantHeaders(want http.Header) {
+ t := rt.t
+ t.Helper()
+ res := rt.response()
+ if diff := diffHeaders(res.Header, want); diff != "" {
+ t.Fatalf("unexpected response headers:\n%v", diff)
+ }
+}
+
+// wantTrailers indicates the expected response trailers.
+func (rt *testRoundTrip) wantTrailers(want http.Header) {
+ t := rt.t
+ t.Helper()
+ res := rt.response()
+ if diff := diffHeaders(res.Trailer, want); diff != "" {
+ t.Fatalf("unexpected response trailers:\n%v", diff)
+ }
+}
+
+func diffHeaders(got, want http.Header) string {
+ // nil and 0-length non-nil are equal.
+ if len(got) == 0 && len(want) == 0 {
+ return ""
+ }
+ // We could do a more sophisticated diff here.
+ // DeepEqual is good enough for now.
+ if reflect.DeepEqual(got, want) {
+ return ""
+ }
+ return fmt.Sprintf("got: %v\nwant: %v", got, want)
+}
+
+// testClientConnNetConn implements net.Conn.
+type testClientConnNetConn testClientConn
+
+func (nc *testClientConnNetConn) Read(b []byte) (n int, err error) {
+ nc.cc.syncHooks.blockUntil(func() bool {
+ return nc.rerr != nil || nc.rbuf.Len() > 0
+ })
+ if nc.rbuf.Len() > 0 {
+ return nc.rbuf.Read(b)
+ }
+ return 0, nc.rerr
+}
+
+func (nc *testClientConnNetConn) Write(b []byte) (n int, err error) {
+ return nc.wbuf.Write(b)
+}
+
+func (nc *testClientConnNetConn) Close() error {
+ nc.netConnClosed = true
+ return nil
+}
+
+func (*testClientConnNetConn) LocalAddr() (_ net.Addr) { return }
+func (*testClientConnNetConn) RemoteAddr() (_ net.Addr) { return }
+func (*testClientConnNetConn) SetDeadline(t time.Time) error { return nil }
+func (*testClientConnNetConn) SetReadDeadline(t time.Time) error { return nil }
+func (*testClientConnNetConn) SetWriteDeadline(t time.Time) error { return nil }
+
+// A testTransport allows testing Transport.RoundTrip against fake servers.
+// Tests that aren't specifically exercising RoundTrip's retry loop or connection pooling
+// should use testClientConn instead.
+type testTransport struct {
+ t *testing.T
+ tr *Transport
+
+ ccs []*testClientConn
+}
+
+func newTestTransport(t *testing.T, opts ...func(*Transport)) *testTransport {
+ tr := &Transport{
+ syncHooks: newTestSyncHooks(),
+ }
+ for _, o := range opts {
+ o(tr)
+ }
+
+ tt := &testTransport{
+ t: t,
+ tr: tr,
+ }
+ tr.syncHooks.newclientconn = func(cc *ClientConn) {
+ tt.ccs = append(tt.ccs, newTestClientConnFromClientConn(t, cc))
+ }
+
+ t.Cleanup(func() {
+ tt.sync()
+ if len(tt.ccs) > 0 {
+ t.Fatalf("%v test ClientConns created, but not examined by test", len(tt.ccs))
+ }
+ if tt.tr.syncHooks.total != 0 {
+ t.Errorf("%v goroutines still running after test completed", tt.tr.syncHooks.total)
+ }
+ })
+
+ return tt
+}
+
+func (tt *testTransport) sync() {
+ tt.tr.syncHooks.waitInactive()
+}
+
+func (tt *testTransport) advance(d time.Duration) {
+ tt.tr.syncHooks.advance(d)
+ tt.sync()
+}
+
+func (tt *testTransport) hasConn() bool {
+ return len(tt.ccs) > 0
+}
+
+func (tt *testTransport) getConn() *testClientConn {
+ tt.t.Helper()
+ if len(tt.ccs) == 0 {
+ tt.t.Fatalf("no new ClientConns created; wanted one")
+ }
+ tc := tt.ccs[0]
+ tt.ccs = tt.ccs[1:]
+ tc.sync()
+ tc.readClientPreface()
+ return tc
+}
+
+func (tt *testTransport) roundTrip(req *http.Request) *testRoundTrip {
+ rt := &testRoundTrip{
+ t: tt.t,
+ donec: make(chan struct{}),
+ }
+ tt.tr.syncHooks.goRun(func() {
+ defer close(rt.donec)
+ rt.resp, rt.respErr = tt.tr.RoundTrip(req)
+ })
+ tt.sync()
+
+ tt.t.Cleanup(func() {
+ if !rt.done() {
+ return
+ }
+ res, _ := rt.result()
+ if res != nil {
+ res.Body.Close()
+ }
+ })
+
+ return rt
+}
diff --git a/http2/databuffer.go b/http2/databuffer.go
index a3067f8de..e6f55cbd1 100644
--- a/http2/databuffer.go
+++ b/http2/databuffer.go
@@ -20,41 +20,44 @@ import (
// TODO: Benchmark to determine if the pools are necessary. The GC may have
// improved enough that we can instead allocate chunks like this:
// make([]byte, max(16<<10, expectedBytesRemaining))
-var (
- dataChunkSizeClasses = []int{
- 1 << 10,
- 2 << 10,
- 4 << 10,
- 8 << 10,
- 16 << 10,
- }
- dataChunkPools = [...]sync.Pool{
- {New: func() interface{} { return make([]byte, 1<<10) }},
- {New: func() interface{} { return make([]byte, 2<<10) }},
- {New: func() interface{} { return make([]byte, 4<<10) }},
- {New: func() interface{} { return make([]byte, 8<<10) }},
- {New: func() interface{} { return make([]byte, 16<<10) }},
- }
-)
+var dataChunkPools = [...]sync.Pool{
+ {New: func() interface{} { return new([1 << 10]byte) }},
+ {New: func() interface{} { return new([2 << 10]byte) }},
+ {New: func() interface{} { return new([4 << 10]byte) }},
+ {New: func() interface{} { return new([8 << 10]byte) }},
+ {New: func() interface{} { return new([16 << 10]byte) }},
+}
func getDataBufferChunk(size int64) []byte {
- i := 0
- for ; i < len(dataChunkSizeClasses)-1; i++ {
- if size <= int64(dataChunkSizeClasses[i]) {
- break
- }
+ switch {
+ case size <= 1<<10:
+ return dataChunkPools[0].Get().(*[1 << 10]byte)[:]
+ case size <= 2<<10:
+ return dataChunkPools[1].Get().(*[2 << 10]byte)[:]
+ case size <= 4<<10:
+ return dataChunkPools[2].Get().(*[4 << 10]byte)[:]
+ case size <= 8<<10:
+ return dataChunkPools[3].Get().(*[8 << 10]byte)[:]
+ default:
+ return dataChunkPools[4].Get().(*[16 << 10]byte)[:]
}
- return dataChunkPools[i].Get().([]byte)
}
func putDataBufferChunk(p []byte) {
- for i, n := range dataChunkSizeClasses {
- if len(p) == n {
- dataChunkPools[i].Put(p)
- return
- }
+ switch len(p) {
+ case 1 << 10:
+ dataChunkPools[0].Put((*[1 << 10]byte)(p))
+ case 2 << 10:
+ dataChunkPools[1].Put((*[2 << 10]byte)(p))
+ case 4 << 10:
+ dataChunkPools[2].Put((*[4 << 10]byte)(p))
+ case 8 << 10:
+ dataChunkPools[3].Put((*[8 << 10]byte)(p))
+ case 16 << 10:
+ dataChunkPools[4].Put((*[16 << 10]byte)(p))
+ default:
+ panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
- panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
}
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
diff --git a/http2/frame.go b/http2/frame.go
index c1f6b90dc..43557ab7e 100644
--- a/http2/frame.go
+++ b/http2/frame.go
@@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
}
func (fr *Framer) maxHeaderStringLen() int {
- v := fr.maxHeaderListSize()
- if uint32(int(v)) == v {
- return int(v)
+ v := int(fr.maxHeaderListSize())
+ if v < 0 {
+ // If maxHeaderListSize overflows an int, use no limit (0).
+ return 0
}
- // They had a crazy big number for MaxHeaderBytes anyway,
- // so give them unlimited header lengths:
- return 0
+ return v
}
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
@@ -1565,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
if size > remainSize {
hdec.SetEmitEnabled(false)
mh.Truncated = true
+ remainSize = 0
return
}
remainSize -= size
@@ -1577,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
var hc headersOrContinuation = hf
for {
frag := hc.HeaderBlockFragment()
+
+ // Avoid parsing large amounts of headers that we will then discard.
+ // If the sender exceeds the max header list size by too much,
+ // skip parsing the fragment and close the connection.
+ //
+ // "Too much" is either any CONTINUATION frame after we've already
+ // exceeded the max header list size (in which case remainSize is 0),
+ // or a frame whose encoded size is more than twice the remaining
+ // header list bytes we're willing to accept.
+ if int64(len(frag)) > int64(2*remainSize) {
+ if VerboseLogs {
+ log.Printf("http2: header list too large")
+ }
+ // It would be nice to send a RST_STREAM before sending the GOAWAY,
+ // but the structure of the server's frame writer makes this difficult.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+
+ // Also close the connection after any CONTINUATION frame following an
+ // invalid header, since we stop tracking the size of the headers after
+ // an invalid one.
+ if invalid != nil {
+ if VerboseLogs {
+ log.Printf("http2: invalid header: %v", invalid)
+ }
+ // It would be nice to send a RST_STREAM before sending the GOAWAY,
+ // but the structure of the server's frame writer makes this difficult.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+
if _, err := hdec.Write(frag); err != nil {
return nil, ConnectionError(ErrCodeCompression)
}
diff --git a/http2/go111.go b/http2/go111.go
deleted file mode 100644
index 5bf62b032..000000000
--- a/http2/go111.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.11
-// +build go1.11
-
-package http2
-
-import (
- "net/http/httptrace"
- "net/textproto"
-)
-
-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
- return trace != nil && trace.WroteHeaderField != nil
-}
-
-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField(k, []string{v})
- }
-}
-
-func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
- if trace != nil {
- return trace.Got1xxResponse
- }
- return nil
-}
diff --git a/http2/go115.go b/http2/go115.go
deleted file mode 100644
index 908af1ab9..000000000
--- a/http2/go115.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.15
-// +build go1.15
-
-package http2
-
-import (
- "context"
- "crypto/tls"
-)
-
-// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
-// connection.
-func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
- dialer := &tls.Dialer{
- Config: cfg,
- }
- cn, err := dialer.DialContext(ctx, network, addr)
- if err != nil {
- return nil, err
- }
- tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
- return tlsCn, nil
-}
diff --git a/http2/go118.go b/http2/go118.go
deleted file mode 100644
index aca4b2b31..000000000
--- a/http2/go118.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.18
-// +build go1.18
-
-package http2
-
-import (
- "crypto/tls"
- "net"
-)
-
-func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
- return tc.NetConn()
-}
diff --git a/http2/h2i/h2i.go b/http2/h2i/h2i.go
index 901f6ca79..ee7020dd9 100644
--- a/http2/h2i/h2i.go
+++ b/http2/h2i/h2i.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows
/*
The h2i command is an interactive HTTP/2 console.
diff --git a/http2/hpack/gen.go b/http2/hpack/gen.go
index de14ab0ec..21a4198b3 100644
--- a/http2/hpack/gen.go
+++ b/http2/hpack/gen.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package main
diff --git a/http2/http2_test.go b/http2/http2_test.go
index f77c08a10..a16774b7f 100644
--- a/http2/http2_test.go
+++ b/http2/http2_test.go
@@ -6,16 +6,13 @@ package http2
import (
"bytes"
- "errors"
"flag"
"fmt"
"io/ioutil"
"net/http"
"os"
- "os/exec"
"path/filepath"
"regexp"
- "strconv"
"strings"
"testing"
"time"
@@ -85,44 +82,6 @@ func encodeHeaderNoImplicit(t *testing.T, headers ...string) []byte {
return buf.Bytes()
}
-// Verify that curl has http2.
-func requireCurl(t *testing.T) {
- out, err := dockerLogs(curl(t, "--version"))
- if err != nil {
- t.Skipf("failed to determine curl features; skipping test")
- }
- if !strings.Contains(string(out), "HTTP2") {
- t.Skip("curl doesn't support HTTP2; skipping test")
- }
-}
-
-func curl(t *testing.T, args ...string) (container string) {
- out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "gohttp2/curl"}, args...)...).Output()
- if err != nil {
- t.Skipf("Failed to run curl in docker: %v, %s", err, out)
- }
- return strings.TrimSpace(string(out))
-}
-
-// Verify that h2load exists.
-func requireH2load(t *testing.T) {
- out, err := dockerLogs(h2load(t, "--version"))
- if err != nil {
- t.Skipf("failed to probe h2load; skipping test: %s", out)
- }
- if !strings.Contains(string(out), "h2load nghttp2/") {
- t.Skipf("h2load not present; skipping test. (Output=%q)", out)
- }
-}
-
-func h2load(t *testing.T, args ...string) (container string) {
- out, err := exec.Command("docker", append([]string{"run", "-d", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl"}, args...)...).Output()
- if err != nil {
- t.Skipf("Failed to run h2load in docker: %v, %s", err, out)
- }
- return strings.TrimSpace(string(out))
-}
-
type puppetCommand struct {
fn func(w http.ResponseWriter, r *http.Request)
done chan<- bool
@@ -151,27 +110,6 @@ func (p *handlerPuppet) do(fn func(http.ResponseWriter, *http.Request)) {
p.ch <- puppetCommand{fn, done}
<-done
}
-func dockerLogs(container string) ([]byte, error) {
- out, err := exec.Command("docker", "wait", container).CombinedOutput()
- if err != nil {
- return out, err
- }
- exitStatus, err := strconv.Atoi(strings.TrimSpace(string(out)))
- if err != nil {
- return out, errors.New("unexpected exit status from docker wait")
- }
- out, err = exec.Command("docker", "logs", container).CombinedOutput()
- exec.Command("docker", "rm", container).Run()
- if err == nil && exitStatus != 0 {
- err = fmt.Errorf("exit status %d: %s", exitStatus, out)
- }
- return out, err
-}
-
-func kill(container string) {
- exec.Command("docker", "kill", container).Run()
- exec.Command("docker", "rm", container).Run()
-}
func cleanDate(res *http.Response) {
if d := res.Header["Date"]; len(d) == 1 {
diff --git a/http2/not_go111.go b/http2/not_go111.go
deleted file mode 100644
index cc0baa819..000000000
--- a/http2/not_go111.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.11
-// +build !go1.11
-
-package http2
-
-import (
- "net/http/httptrace"
- "net/textproto"
-)
-
-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
-
-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
-
-func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
- return nil
-}
diff --git a/http2/not_go115.go b/http2/not_go115.go
deleted file mode 100644
index e6c04cf7a..000000000
--- a/http2/not_go115.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.15
-// +build !go1.15
-
-package http2
-
-import (
- "context"
- "crypto/tls"
-)
-
-// dialTLSWithContext opens a TLS connection.
-func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
- cn, err := tls.Dial(network, addr, cfg)
- if err != nil {
- return nil, err
- }
- if err := cn.Handshake(); err != nil {
- return nil, err
- }
- if cfg.InsecureSkipVerify {
- return cn, nil
- }
- if err := cn.VerifyHostname(cfg.ServerName); err != nil {
- return nil, err
- }
- return cn, nil
-}
diff --git a/http2/not_go118.go b/http2/not_go118.go
deleted file mode 100644
index eab532c96..000000000
--- a/http2/not_go118.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.18
-// +build !go1.18
-
-package http2
-
-import (
- "crypto/tls"
- "net"
-)
-
-func tlsUnderlyingConn(tc *tls.Conn) net.Conn {
- return nil
-}
diff --git a/http2/pipe.go b/http2/pipe.go
index 684d984fd..3b9f06b96 100644
--- a/http2/pipe.go
+++ b/http2/pipe.go
@@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) {
}
}
-var errClosedPipeWrite = errors.New("write on closed buffer")
+var (
+ errClosedPipeWrite = errors.New("write on closed buffer")
+ errUninitializedPipeWrite = errors.New("write on uninitialized buffer")
+)
// Write copies bytes from p into the buffer and wakes a reader.
// It is an error to write more data than the buffer can hold.
@@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) {
if p.err != nil || p.breakErr != nil {
return 0, errClosedPipeWrite
}
+ // pipe.setBuffer is never invoked, leaving the buffer uninitialized.
+ // We shouldn't try to write to an uninitialized pipe,
+ // but returning an error is better than panicking.
+ if p.b == nil {
+ return 0, errUninitializedPipeWrite
+ }
return p.b.Write(d)
}
diff --git a/http2/server.go b/http2/server.go
index 033b6e6db..ce2e8b40e 100644
--- a/http2/server.go
+++ b/http2/server.go
@@ -124,6 +124,7 @@ type Server struct {
// IdleTimeout specifies how long until idle clients should be
// closed with a GOAWAY frame. PING frames are not considered
// activity for the purposes of IdleTimeout.
+ // If zero or negative, there is no timeout.
IdleTimeout time.Duration
// MaxUploadBufferPerConnection is the size of the initial flow
@@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
// passes the connection off to us with the deadline already set.
// Write deadlines are set per stream in serverConn.newStream.
// Disarm the net.Conn write deadline here.
- if sc.hs.WriteTimeout != 0 {
+ if sc.hs.WriteTimeout > 0 {
sc.conn.SetWriteDeadline(time.Time{})
}
@@ -581,9 +582,11 @@ type serverConn struct {
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push
+ curHandlers uint32 // number of running handler goroutines
maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream
+ unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
@@ -922,7 +925,7 @@ func (sc *serverConn) serve() {
sc.setConnState(http.StateActive)
sc.setConnState(http.StateIdle)
- if sc.srv.IdleTimeout != 0 {
+ if sc.srv.IdleTimeout > 0 {
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
@@ -981,6 +984,8 @@ func (sc *serverConn) serve() {
return
case gracefulShutdownMsg:
sc.startGracefulShutdownInternal()
+ case handlerDoneMsg:
+ sc.handlerDone()
default:
panic("unknown timer")
}
@@ -1012,14 +1017,6 @@ func (sc *serverConn) serve() {
}
}
-func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
- select {
- case <-sc.doneServing:
- case <-sharedCh:
- close(privateCh)
- }
-}
-
type serverMessage int
// Message values sent to serveMsgCh.
@@ -1028,6 +1025,7 @@ var (
idleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
+ handlerDoneMsg = new(serverMessage)
)
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
@@ -1640,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
delete(sc.streams, st.id)
if len(sc.streams) == 0 {
sc.setConnState(http.StateIdle)
- if sc.srv.IdleTimeout != 0 {
+ if sc.srv.IdleTimeout > 0 {
sc.idleTimer.Reset(sc.srv.IdleTimeout)
}
if h1ServerKeepAlivesDisabled(sc.hs) {
@@ -1900,9 +1898,11 @@ func (st *stream) copyTrailersToHandlerRequest() {
// onReadTimeout is run on its own goroutine (from time.AfterFunc)
// when the stream's ReadTimeout has fired.
func (st *stream) onReadTimeout() {
- // Wrap the ErrDeadlineExceeded to avoid callers depending on us
- // returning the bare error.
- st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
+ if st.body != nil {
+ // Wrap the ErrDeadlineExceeded to avoid callers depending on us
+ // returning the bare error.
+ st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded))
+ }
}
// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
@@ -2018,15 +2018,12 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// similar to how the http1 server works. Here it's
// technically more like the http1 Server's ReadHeaderTimeout
// (in Go 1.8), though. That's a more sane option anyway.
- if sc.hs.ReadTimeout != 0 {
+ if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- if st.body != nil {
- st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
- }
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
- go sc.runHandler(rw, req, handler)
- return nil
+ return sc.scheduleHandler(id, rw, req, handler)
}
func (sc *serverConn) upgradeRequest(req *http.Request) {
@@ -2042,10 +2039,14 @@ func (sc *serverConn) upgradeRequest(req *http.Request) {
// Disable any read deadline set by the net/http package
// prior to the upgrade.
- if sc.hs.ReadTimeout != 0 {
+ if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
}
+ // This is the first request on the connection,
+ // so start the handler directly rather than going
+ // through scheduleHandler.
+ sc.curHandlers++
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
}
@@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
- if sc.hs.WriteTimeout != 0 {
+ if sc.hs.WriteTimeout > 0 {
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
@@ -2286,8 +2287,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response
return &responseWriter{rws: rws}
}
+type unstartedHandler struct {
+ streamID uint32
+ rw *responseWriter
+ req *http.Request
+ handler func(http.ResponseWriter, *http.Request)
+}
+
+// scheduleHandler starts a handler goroutine,
+// or schedules one to start as soon as an existing handler finishes.
+func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error {
+ sc.serveG.check()
+ maxHandlers := sc.advMaxStreams
+ if sc.curHandlers < maxHandlers {
+ sc.curHandlers++
+ go sc.runHandler(rw, req, handler)
+ return nil
+ }
+ if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) {
+ return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm))
+ }
+ sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{
+ streamID: streamID,
+ rw: rw,
+ req: req,
+ handler: handler,
+ })
+ return nil
+}
+
+func (sc *serverConn) handlerDone() {
+ sc.serveG.check()
+ sc.curHandlers--
+ i := 0
+ maxHandlers := sc.advMaxStreams
+ for ; i < len(sc.unstartedHandlers); i++ {
+ u := sc.unstartedHandlers[i]
+ if sc.streams[u.streamID] == nil {
+ // This stream was reset before its goroutine had a chance to start.
+ continue
+ }
+ if sc.curHandlers >= maxHandlers {
+ break
+ }
+ sc.curHandlers++
+ go sc.runHandler(u.rw, u.req, u.handler)
+ sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references
+ }
+ sc.unstartedHandlers = sc.unstartedHandlers[i:]
+ if len(sc.unstartedHandlers) == 0 {
+ sc.unstartedHandlers = nil
+ }
+}
+
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
rw.rws.stream.cancelCtx()
@@ -2495,7 +2550,6 @@ type responseWriterState struct {
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
- dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64
@@ -2615,7 +2669,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
date: date,
})
if err != nil {
- rws.dirty = true
return 0, err
}
if endStream {
@@ -2636,7 +2689,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
- rws.dirty = true
return 0, err
}
}
@@ -2648,9 +2700,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
trailers: rws.trailers,
endStream: true,
})
- if err != nil {
- rws.dirty = true
- }
return len(p), err
}
return len(p), nil
@@ -2866,14 +2915,12 @@ func (rws *responseWriterState) writeHeader(code int) {
h.Del("Transfer-Encoding")
}
- if rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
httpResCode: code,
h: h,
endStream: rws.handlerDone && !rws.hasTrailers(),
- }) != nil {
- rws.dirty = true
- }
+ })
return
}
@@ -2938,19 +2985,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
func (w *responseWriter) handlerDone() {
rws := w.rws
- dirty := rws.dirty
rws.handlerDone = true
w.Flush()
w.rws = nil
- if !dirty {
- // Only recycle the pool if all prior Write calls to
- // the serverConn goroutine completed successfully. If
- // they returned earlier due to resets from the peer
- // there might still be write goroutines outstanding
- // from the serverConn referencing the rws memory. See
- // issue 20704.
- responseWriterStatePool.Put(rws)
- }
+ responseWriterStatePool.Put(rws)
}
// Push errors.
@@ -3133,6 +3171,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
}
+ sc.curHandlers++
go sc.runHandler(rw, req, sc.handler.ServeHTTP)
return promisedID, nil
}
diff --git a/http2/server_push_test.go b/http2/server_push_test.go
index 6e57de0b7..cda8f4336 100644
--- a/http2/server_push_test.go
+++ b/http2/server_push_test.go
@@ -11,6 +11,7 @@ import (
"io/ioutil"
"net/http"
"reflect"
+ "runtime"
"strconv"
"sync"
"testing"
@@ -483,11 +484,7 @@ func TestServer_Push_RejectAfterGoAway(t *testing.T) {
ready := make(chan struct{})
errc := make(chan error, 2)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
- select {
- case <-ready:
- case <-time.After(5 * time.Second):
- errc <- fmt.Errorf("timeout waiting for GOAWAY to be processed")
- }
+ <-ready
if got, want := w.(http.Pusher).Push("https://"+r.Host+"/pushed", nil), http.ErrNotSupported; got != want {
errc <- fmt.Errorf("Push()=%v, want %v", got, want)
}
@@ -505,6 +502,10 @@ func TestServer_Push_RejectAfterGoAway(t *testing.T) {
case <-ready:
return
default:
+ if runtime.GOARCH == "wasm" {
+ // Work around https://go.dev/issue/65178 to avoid goroutine starvation.
+ runtime.Gosched()
+ }
}
st.sc.serveMsgCh <- func(loopNum int) {
if !st.sc.pushEnabled {
@@ -517,3 +518,55 @@ func TestServer_Push_RejectAfterGoAway(t *testing.T) {
t.Error(err)
}
}
+
+func TestServer_Push_Underflow(t *testing.T) {
+ // Test for #63511: Send several requests which generate PUSH_PROMISE responses,
+ // verify they all complete successfully.
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.RequestURI() {
+ case "/":
+ opt := &http.PushOptions{
+ Header: http.Header{"User-Agent": {"testagent"}},
+ }
+ if err := w.(http.Pusher).Push("/pushed", opt); err != nil {
+ t.Errorf("error pushing: %v", err)
+ }
+ w.WriteHeader(200)
+ case "/pushed":
+ r.Header.Set("User-Agent", "newagent")
+ r.Header.Set("Cookie", "cookie")
+ w.WriteHeader(200)
+ default:
+ t.Errorf("unknown RequestURL %q", r.URL.RequestURI())
+ }
+ })
+ // Send several requests.
+ st.greet()
+ const numRequests = 4
+ for i := 0; i < numRequests; i++ {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: uint32(1 + i*2), // clients send odd numbers
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ }
+ // Each request should result in one PUSH_PROMISE and two responses.
+ numPushPromises := 0
+ numHeaders := 0
+ for numHeaders < numRequests*2 || numPushPromises < numRequests {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatal(err)
+ }
+ switch f := f.(type) {
+ case *HeadersFrame:
+ if !f.Flags.Has(FlagHeadersEndStream) {
+ t.Fatalf("got HEADERS frame with no END_STREAM, expected END_STREAM: %v", f)
+ }
+ numHeaders++
+ case *PushPromiseFrame:
+ numPushPromises++
+ }
+ }
+}
diff --git a/http2/server_test.go b/http2/server_test.go
index cd73291ea..a931a06e5 100644
--- a/http2/server_test.go
+++ b/http2/server_test.go
@@ -20,13 +20,11 @@ import (
"net/http"
"net/http/httptest"
"os"
- "os/exec"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
- "sync/atomic"
"testing"
"time"
@@ -147,6 +145,12 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}
ConfigureServer(ts.Config, h2server)
+ // Go 1.22 changes the default minimum TLS version to TLS 1.2,
+ // in order to properly test cases where we want to reject low
+ // TLS versions, we need to explicitly configure the minimum
+ // version here.
+ ts.Config.TLSConfig.MinVersion = tls.VersionTLS10
+
st := &serverTester{
t: t,
ts: ts,
@@ -2704,96 +2708,6 @@ func readBodyHandler(t *testing.T, want string) func(w http.ResponseWriter, r *h
}
}
-// TestServerWithCurl currently fails, hence the LenientCipherSuites test. See:
-//
-// https://github.com/tatsuhiro-t/nghttp2/issues/140 &
-// http://sourceforge.net/p/curl/bugs/1472/
-func TestServerWithCurl(t *testing.T) { testServerWithCurl(t, false) }
-func TestServerWithCurl_LenientCipherSuites(t *testing.T) { testServerWithCurl(t, true) }
-
-func testServerWithCurl(t *testing.T, permitProhibitedCipherSuites bool) {
- if runtime.GOOS != "linux" {
- t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
- }
- if testing.Short() {
- t.Skip("skipping curl test in short mode")
- }
- requireCurl(t)
- var gotConn int32
- testHookOnConn = func() { atomic.StoreInt32(&gotConn, 1) }
-
- const msg = "Hello from curl!\n"
- ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Foo", "Bar")
- w.Header().Set("Client-Proto", r.Proto)
- io.WriteString(w, msg)
- }))
- ConfigureServer(ts.Config, &Server{
- PermitProhibitedCipherSuites: permitProhibitedCipherSuites,
- })
- ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config
- ts.StartTLS()
- defer ts.Close()
-
- t.Logf("Running test server for curl to hit at: %s", ts.URL)
- container := curl(t, "--silent", "--http2", "--insecure", "-v", ts.URL)
- defer kill(container)
- res, err := dockerLogs(container)
- if err != nil {
- t.Fatal(err)
- }
-
- body := string(res)
- // Search for both "key: value" and "key:value", since curl changed their format
- // Our Dockerfile contains the latest version (no space), but just in case people
- // didn't rebuild, check both.
- if !strings.Contains(body, "foo: Bar") && !strings.Contains(body, "foo:Bar") {
- t.Errorf("didn't see foo: Bar header")
- t.Logf("Got: %s", body)
- }
- if !strings.Contains(body, "client-proto: HTTP/2") && !strings.Contains(body, "client-proto:HTTP/2") {
- t.Errorf("didn't see client-proto: HTTP/2 header")
- t.Logf("Got: %s", res)
- }
- if !strings.Contains(string(res), msg) {
- t.Errorf("didn't see %q content", msg)
- t.Logf("Got: %s", res)
- }
-
- if atomic.LoadInt32(&gotConn) == 0 {
- t.Error("never saw an http2 connection")
- }
-}
-
-var doh2load = flag.Bool("h2load", false, "Run h2load test")
-
-func TestServerWithH2Load(t *testing.T) {
- if !*doh2load {
- t.Skip("Skipping without --h2load flag.")
- }
- if runtime.GOOS != "linux" {
- t.Skip("skipping Docker test when not on Linux; requires --net which won't work with boot2docker anyway")
- }
- requireH2load(t)
-
- msg := strings.Repeat("Hello, h2load!\n", 5000)
- ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, msg)
- w.(http.Flusher).Flush()
- io.WriteString(w, msg)
- }))
- ts.StartTLS()
- defer ts.Close()
-
- cmd := exec.Command("docker", "run", "--net=host", "--entrypoint=/usr/local/bin/h2load", "gohttp2/curl",
- "-n100000", "-c100", "-m100", ts.URL)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- if err := cmd.Run(); err != nil {
- t.Fatal(err)
- }
-}
-
func TestServer_MaxDecoderHeaderTableSize(t *testing.T) {
wantHeaderTableSize := uint32(initialHeaderTableSize * 2)
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, func(s *Server) {
@@ -4664,13 +4578,16 @@ func TestCanonicalHeaderCacheGrowth(t *testing.T) {
sc := &serverConn{
serveG: newGoroutineLock(),
}
- const count = 1000
- for i := 0; i < count; i++ {
- h := fmt.Sprintf("%v-%v", base, i)
+ count := 0
+ added := 0
+ for added < 10*maxCachedCanonicalHeadersKeysSize {
+ h := fmt.Sprintf("%v-%v", base, count)
c := sc.canonicalHeader(h)
if len(h) != len(c) {
t.Errorf("sc.canonicalHeader(%q) = %q, want same length", h, c)
}
+ count++
+ added += len(h)
}
total := 0
for k, v := range sc.canonHeader {
@@ -4756,3 +4673,202 @@ func TestServerWriteDoesNotRetainBufferAfterServerClose(t *testing.T) {
st.ts.Config.Close()
<-donec
}
+
+func TestServerMaxHandlerGoroutines(t *testing.T) {
+ const maxHandlers = 10
+ handlerc := make(chan chan bool)
+ donec := make(chan struct{})
+ defer close(donec)
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ stopc := make(chan bool, 1)
+ select {
+ case handlerc <- stopc:
+ case <-donec:
+ }
+ select {
+ case shouldPanic := <-stopc:
+ if shouldPanic {
+ panic(http.ErrAbortHandler)
+ }
+ case <-donec:
+ }
+ }, func(s *Server) {
+ s.MaxConcurrentStreams = maxHandlers
+ })
+ defer st.Close()
+
+ st.writePreface()
+ st.writeInitialSettings()
+ st.writeSettingsAck()
+
+ // Make maxHandlers concurrent requests.
+ // Reset them all, but only after the handler goroutines have started.
+ var stops []chan bool
+ streamID := uint32(1)
+ for i := 0; i < maxHandlers; i++ {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ stops = append(stops, <-handlerc)
+ st.fr.WriteRSTStream(streamID, ErrCodeCancel)
+ streamID += 2
+ }
+
+ // Start another request, and immediately reset it.
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ st.fr.WriteRSTStream(streamID, ErrCodeCancel)
+ streamID += 2
+
+ // Start another two requests. Don't reset these.
+ for i := 0; i < 2; i++ {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ streamID += 2
+ }
+
+ // The initial maxHandlers handlers are still executing,
+ // so the last two requests don't start any new handlers.
+ select {
+ case <-handlerc:
+ t.Errorf("handler unexpectedly started while maxHandlers are already running")
+ case <-time.After(1 * time.Millisecond):
+ }
+
+ // Tell two handlers to exit.
+ // The pending requests which weren't reset start handlers.
+ stops[0] <- false // normal exit
+ stops[1] <- true // panic
+ stops = stops[2:]
+ stops = append(stops, <-handlerc)
+ stops = append(stops, <-handlerc)
+
+ // Make a bunch more requests.
+ // Eventually, the server tells us to go away.
+ for i := 0; i < 5*maxHandlers; i++ {
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ EndHeaders: true,
+ })
+ st.fr.WriteRSTStream(streamID, ErrCodeCancel)
+ streamID += 2
+ }
+Frames:
+ for {
+ f, err := st.readFrame()
+ if err != nil {
+ st.t.Fatal(err)
+ }
+ switch f := f.(type) {
+ case *GoAwayFrame:
+ if f.ErrCode != ErrCodeEnhanceYourCalm {
+ t.Errorf("err code = %v; want %v", f.ErrCode, ErrCodeEnhanceYourCalm)
+ }
+ break Frames
+ default:
+ }
+ }
+
+ for _, s := range stops {
+ close(s)
+ }
+}
+
+func TestServerContinuationFlood(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Header)
+ }, func(ts *httptest.Server) {
+ ts.Config.MaxHeaderBytes = 4096
+ })
+ defer st.Close()
+
+ st.writePreface()
+ st.writeInitialSettings()
+ st.writeSettingsAck()
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ })
+ for i := 0; i < 1000; i++ {
+ st.fr.WriteContinuation(1, false, st.encodeHeaderRaw(
+ fmt.Sprintf("x-%v", i), "1234567890",
+ ))
+ }
+ st.fr.WriteContinuation(1, true, st.encodeHeaderRaw(
+ "x-last-header", "1",
+ ))
+
+ for {
+ f, err := st.readFrame()
+ if err != nil {
+ break
+ }
+ switch f.(type) {
+ case *HeadersFrame:
+ t.Fatalf("received HEADERS frame; want GOAWAY and a closed connection")
+ }
+ }
+ // We expect to have seen a GOAWAY before the connection closes,
+ // but the server will close the connection after one second
+ // whether or not it has finished sending the GOAWAY. On windows-amd64-race
+ // builders, this fairly consistently results in the connection closing without
+ // the GOAWAY being sent.
+ //
+ // Since the server's behavior is inherently racy here and the important thing
+ // is that the connection is closed, don't check for the GOAWAY having been sent.
+}
+
+func TestServerContinuationAfterInvalidHeader(t *testing.T) {
+ st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Header)
+ })
+ defer st.Close()
+
+ st.writePreface()
+ st.writeInitialSettings()
+ st.writeSettingsAck()
+
+ st.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ BlockFragment: st.encodeHeader(),
+ EndStream: true,
+ })
+ st.fr.WriteContinuation(1, false, st.encodeHeaderRaw(
+ "x-invalid-header", "\x00",
+ ))
+ st.fr.WriteContinuation(1, true, st.encodeHeaderRaw(
+ "x-valid-header", "1",
+ ))
+
+ var sawGoAway bool
+ for {
+ f, err := st.readFrame()
+ if err != nil {
+ break
+ }
+ switch f.(type) {
+ case *GoAwayFrame:
+ sawGoAway = true
+ case *HeadersFrame:
+ t.Fatalf("received HEADERS frame; want GOAWAY")
+ }
+ }
+ if !sawGoAway {
+ t.Errorf("connection closed with no GOAWAY frame; want one")
+ }
+}
diff --git a/http2/testsync.go b/http2/testsync.go
new file mode 100644
index 000000000..61075bd16
--- /dev/null
+++ b/http2/testsync.go
@@ -0,0 +1,331 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package http2
+
+import (
+ "context"
+ "sync"
+ "time"
+)
+
+// testSyncHooks coordinates goroutines in tests.
+//
+// For example, a call to ClientConn.RoundTrip involves several goroutines, including:
+// - the goroutine running RoundTrip;
+// - the clientStream.doRequest goroutine, which writes the request; and
+// - the clientStream.readLoop goroutine, which reads the response.
+//
+// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines
+// are blocked waiting for some condition such as reading the Request.Body or waiting for
+// flow control to become available.
+//
+// The testSyncHooks also manage timers and synthetic time in tests.
+// This permits us to, for example, start a request and cause it to time out waiting for
+// response headers without resorting to time.Sleep calls.
+type testSyncHooks struct {
+ // active/inactive act as a mutex and condition variable.
+ //
+ // - neither chan contains a value: testSyncHooks is locked.
+ // - active contains a value: unlocked, and at least one goroutine is not blocked
+ // - inactive contains a value: unlocked, and all goroutines are blocked
+ active chan struct{}
+ inactive chan struct{}
+
+ // goroutine counts
+ total int // total goroutines
+ condwait map[*sync.Cond]int // blocked in sync.Cond.Wait
+ blocked []*testBlockedGoroutine // otherwise blocked
+
+ // fake time
+ now time.Time
+ timers []*fakeTimer
+
+ // Transport testing: Report various events.
+ newclientconn func(*ClientConn)
+ newstream func(*clientStream)
+}
+
+// testBlockedGoroutine is a blocked goroutine.
+type testBlockedGoroutine struct {
+ f func() bool // blocked until f returns true
+ ch chan struct{} // closed when unblocked
+}
+
+func newTestSyncHooks() *testSyncHooks {
+ h := &testSyncHooks{
+ active: make(chan struct{}, 1),
+ inactive: make(chan struct{}, 1),
+ condwait: map[*sync.Cond]int{},
+ }
+ h.inactive <- struct{}{}
+ h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)
+ return h
+}
+
+// lock acquires the testSyncHooks mutex.
+func (h *testSyncHooks) lock() {
+ select {
+ case <-h.active:
+ case <-h.inactive:
+ }
+}
+
+// waitInactive waits for all goroutines to become inactive.
+func (h *testSyncHooks) waitInactive() {
+ for {
+ <-h.inactive
+ if !h.unlock() {
+ break
+ }
+ }
+}
+
+// unlock releases the testSyncHooks mutex.
+// It reports whether any goroutines are active.
+func (h *testSyncHooks) unlock() (active bool) {
+ // Look for a blocked goroutine which can be unblocked.
+ blocked := h.blocked[:0]
+ unblocked := false
+ for _, b := range h.blocked {
+ if !unblocked && b.f() {
+ unblocked = true
+ close(b.ch)
+ } else {
+ blocked = append(blocked, b)
+ }
+ }
+ h.blocked = blocked
+
+ // Count goroutines blocked on condition variables.
+ condwait := 0
+ for _, count := range h.condwait {
+ condwait += count
+ }
+
+ if h.total > condwait+len(blocked) {
+ h.active <- struct{}{}
+ return true
+ } else {
+ h.inactive <- struct{}{}
+ return false
+ }
+}
+
+// goRun starts a new goroutine.
+func (h *testSyncHooks) goRun(f func()) {
+ h.lock()
+ h.total++
+ h.unlock()
+ go func() {
+ defer func() {
+ h.lock()
+ h.total--
+ h.unlock()
+ }()
+ f()
+ }()
+}
+
+// blockUntil indicates that a goroutine is blocked waiting for some condition to become true.
+// It waits until f returns true before proceeding.
+//
+// Example usage:
+//
+// h.blockUntil(func() bool {
+// // Is the context done yet?
+// select {
+// case <-ctx.Done():
+// default:
+// return false
+// }
+// return true
+// })
+// // Wait for the context to become done.
+// <-ctx.Done()
+//
+// The function f passed to blockUntil must be non-blocking and idempotent.
+func (h *testSyncHooks) blockUntil(f func() bool) {
+ if f() {
+ return
+ }
+ ch := make(chan struct{})
+ h.lock()
+ h.blocked = append(h.blocked, &testBlockedGoroutine{
+ f: f,
+ ch: ch,
+ })
+ h.unlock()
+ <-ch
+}
+
+// broadcast is sync.Cond.Broadcast.
+func (h *testSyncHooks) condBroadcast(cond *sync.Cond) {
+ h.lock()
+ delete(h.condwait, cond)
+ h.unlock()
+ cond.Broadcast()
+}
+
+// broadcast is sync.Cond.Wait.
+func (h *testSyncHooks) condWait(cond *sync.Cond) {
+ h.lock()
+ h.condwait[cond]++
+ h.unlock()
+}
+
+// newTimer creates a new fake timer.
+func (h *testSyncHooks) newTimer(d time.Duration) timer {
+ h.lock()
+ defer h.unlock()
+ t := &fakeTimer{
+ hooks: h,
+ when: h.now.Add(d),
+ c: make(chan time.Time),
+ }
+ h.timers = append(h.timers, t)
+ return t
+}
+
+// afterFunc creates a new fake AfterFunc timer.
+func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer {
+ h.lock()
+ defer h.unlock()
+ t := &fakeTimer{
+ hooks: h,
+ when: h.now.Add(d),
+ f: f,
+ }
+ h.timers = append(h.timers, t)
+ return t
+}
+
+func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
+ ctx, cancel := context.WithCancel(ctx)
+ t := h.afterFunc(d, cancel)
+ return ctx, func() {
+ t.Stop()
+ cancel()
+ }
+}
+
+func (h *testSyncHooks) timeUntilEvent() time.Duration {
+ h.lock()
+ defer h.unlock()
+ var next time.Time
+ for _, t := range h.timers {
+ if next.IsZero() || t.when.Before(next) {
+ next = t.when
+ }
+ }
+ if d := next.Sub(h.now); d > 0 {
+ return d
+ }
+ return 0
+}
+
+// advance advances time and causes synthetic timers to fire.
+func (h *testSyncHooks) advance(d time.Duration) {
+ h.lock()
+ defer h.unlock()
+ h.now = h.now.Add(d)
+ timers := h.timers[:0]
+ for _, t := range h.timers {
+ t := t // remove after go.mod depends on go1.22
+ t.mu.Lock()
+ switch {
+ case t.when.After(h.now):
+ timers = append(timers, t)
+ case t.when.IsZero():
+ // stopped timer
+ default:
+ t.when = time.Time{}
+ if t.c != nil {
+ close(t.c)
+ }
+ if t.f != nil {
+ h.total++
+ go func() {
+ defer func() {
+ h.lock()
+ h.total--
+ h.unlock()
+ }()
+ t.f()
+ }()
+ }
+ }
+ t.mu.Unlock()
+ }
+ h.timers = timers
+}
+
+// A timer wraps a time.Timer, or a synthetic equivalent in tests.
+// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires.
+type timer interface {
+ C() <-chan time.Time
+ Stop() bool
+ Reset(d time.Duration) bool
+}
+
+// timeTimer implements timer using real time.
+type timeTimer struct {
+ t *time.Timer
+ c chan time.Time
+}
+
+// newTimeTimer creates a new timer using real time.
+func newTimeTimer(d time.Duration) timer {
+ ch := make(chan time.Time)
+ t := time.AfterFunc(d, func() {
+ close(ch)
+ })
+ return &timeTimer{t, ch}
+}
+
+// newTimeAfterFunc creates an AfterFunc timer using real time.
+func newTimeAfterFunc(d time.Duration, f func()) timer {
+ return &timeTimer{
+ t: time.AfterFunc(d, f),
+ }
+}
+
+func (t timeTimer) C() <-chan time.Time { return t.c }
+func (t timeTimer) Stop() bool { return t.t.Stop() }
+func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) }
+
+// fakeTimer implements timer using fake time.
+type fakeTimer struct {
+ hooks *testSyncHooks
+
+ mu sync.Mutex
+ when time.Time // when the timer will fire
+ c chan time.Time // closed when the timer fires; mutually exclusive with f
+ f func() // called when the timer fires; mutually exclusive with c
+}
+
+func (t *fakeTimer) C() <-chan time.Time { return t.c }
+
+func (t *fakeTimer) Stop() bool {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ stopped := t.when.IsZero()
+ t.when = time.Time{}
+ return stopped
+}
+
+func (t *fakeTimer) Reset(d time.Duration) bool {
+ if t.c != nil || t.f == nil {
+ panic("fakeTimer only supports Reset on AfterFunc timers")
+ }
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ t.hooks.lock()
+ defer t.hooks.unlock()
+ active := !t.when.IsZero()
+ t.when = t.hooks.now.Add(d)
+ if !active {
+ t.hooks.timers = append(t.hooks.timers, t)
+ }
+ return active
+}
diff --git a/http2/transport.go b/http2/transport.go
index b9632380e..ce375c8c7 100644
--- a/http2/transport.go
+++ b/http2/transport.go
@@ -19,6 +19,7 @@ import (
"io/fs"
"log"
"math"
+ "math/bits"
mathrand "math/rand"
"net"
"net/http"
@@ -146,6 +147,12 @@ type Transport struct {
// waiting for their turn.
StrictMaxConcurrentStreams bool
+ // IdleConnTimeout is the maximum amount of time an idle
+ // (keep-alive) connection will remain idle before closing
+ // itself.
+ // Zero means no limit.
+ IdleConnTimeout time.Duration
+
// ReadIdleTimeout is the timeout after which a health check using ping
// frame will be carried out if no frame is received on the connection.
// Note that a ping response will is considered a received frame, so if
@@ -177,6 +184,8 @@ type Transport struct {
connPoolOnce sync.Once
connPoolOrDef ClientConnPool // non-nil version of ConnPool
+
+ syncHooks *testSyncHooks
}
func (t *Transport) maxHeaderListSize() uint32 {
@@ -290,8 +299,7 @@ func (t *Transport) initConnPool() {
// HTTP/2 server.
type ClientConn struct {
t *Transport
- tconn net.Conn // usually *tls.Conn, except specialized impls
- tconnClosed bool
+ tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls
reused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request
@@ -302,7 +310,7 @@ type ClientConn struct {
readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never
- idleTimer *time.Timer
+ idleTimer timer
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
@@ -344,6 +352,60 @@ type ClientConn struct {
werr error // first write error that has occurred
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
+
+ syncHooks *testSyncHooks // can be nil
+}
+
+// Hook points used for testing.
+// Outside of tests, cc.syncHooks is nil and these all have minimal implementations.
+// Inside tests, see the testSyncHooks function docs.
+
+// goRun starts a new goroutine.
+func (cc *ClientConn) goRun(f func()) {
+ if cc.syncHooks != nil {
+ cc.syncHooks.goRun(f)
+ return
+ }
+ go f()
+}
+
+// condBroadcast is cc.cond.Broadcast.
+func (cc *ClientConn) condBroadcast() {
+ if cc.syncHooks != nil {
+ cc.syncHooks.condBroadcast(cc.cond)
+ }
+ cc.cond.Broadcast()
+}
+
+// condWait is cc.cond.Wait.
+func (cc *ClientConn) condWait() {
+ if cc.syncHooks != nil {
+ cc.syncHooks.condWait(cc.cond)
+ }
+ cc.cond.Wait()
+}
+
+// newTimer creates a new time.Timer, or a synthetic timer in tests.
+func (cc *ClientConn) newTimer(d time.Duration) timer {
+ if cc.syncHooks != nil {
+ return cc.syncHooks.newTimer(d)
+ }
+ return newTimeTimer(d)
+}
+
+// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
+func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer {
+ if cc.syncHooks != nil {
+ return cc.syncHooks.afterFunc(d, f)
+ }
+ return newTimeAfterFunc(d, f)
+}
+
+func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
+ if cc.syncHooks != nil {
+ return cc.syncHooks.contextWithTimeout(ctx, d)
+ }
+ return context.WithTimeout(ctx, d)
}
// clientStream is the state for a single HTTP/2 stream. One of these
@@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) {
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
if cs.cc.cond != nil {
// Wake up writeRequestBody if it is waiting on flow control.
- cs.cc.cond.Broadcast()
+ cs.cc.condBroadcast()
}
}
@@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() {
defer cc.mu.Unlock()
if cs.reqBody != nil && cs.reqBodyClosed == nil {
cs.closeReqBodyLocked()
- cc.cond.Broadcast()
+ cc.condBroadcast()
}
}
@@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() {
}
cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed
- go func() {
+ cs.cc.goRun(func() {
cs.reqBody.Close()
close(reqBodyClosed)
- }()
+ })
}
type stickyErrWriter struct {
@@ -518,11 +580,14 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
func authorityAddr(scheme string, authority string) (addr string) {
host, port, err := net.SplitHostPort(authority)
if err != nil { // authority didn't have a port
+ host = authority
+ port = ""
+ }
+ if port == "" { // authority's port was empty
port = "443"
if scheme == "http" {
port = "80"
}
- host = authority
}
if a, err := idna.ToASCII(host); err == nil {
host = a
@@ -534,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) {
return net.JoinHostPort(host, port)
}
-var retryBackoffHook func(time.Duration) *time.Timer
-
-func backoffNewTimer(d time.Duration) *time.Timer {
- if retryBackoffHook != nil {
- return retryBackoffHook(d)
- }
- return time.NewTimer(d)
-}
-
// RoundTripOpt is like RoundTrip, but takes options.
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
@@ -570,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff)
- timer := backoffNewTimer(d)
+ var tm timer
+ if t.syncHooks != nil {
+ tm = t.syncHooks.newTimer(d)
+ t.syncHooks.blockUntil(func() bool {
+ select {
+ case <-tm.C():
+ case <-req.Context().Done():
+ default:
+ return false
+ }
+ return true
+ })
+ } else {
+ tm = newTimeTimer(d)
+ }
select {
- case <-timer.C:
+ case <-tm.C():
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
- timer.Stop()
+ tm.Stop()
err = req.Context().Err()
}
}
@@ -655,6 +725,9 @@ func canRetryError(err error) bool {
}
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
+ if t.syncHooks != nil {
+ return t.newClientConn(nil, singleUse, t.syncHooks)
+ }
host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
@@ -663,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
if err != nil {
return nil, err
}
- return t.newClientConn(tconn, singleUse)
+ return t.newClientConn(tconn, singleUse, nil)
}
func (t *Transport) newTLSConfig(host string) *tls.Config {
@@ -729,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 {
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- return t.newClientConn(c, t.disableKeepAlives())
+ return t.newClientConn(c, t.disableKeepAlives(), nil)
}
-func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) {
cc := &ClientConn{
t: t,
tconn: c,
@@ -747,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
+ syncHooks: hooks,
+ }
+ if hooks != nil {
+ hooks.newclientconn(cc)
+ c = cc.tconn
}
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
- cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
+ cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout)
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -815,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
return nil, cc.werr
}
- go cc.readLoop()
+ cc.goRun(cc.readLoop)
return cc, nil
}
@@ -823,7 +901,7 @@ func (cc *ClientConn) healthCheck() {
pingTimeout := cc.t.pingTimeout()
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
- ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
+ ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout)
defer cancel()
cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
@@ -1015,7 +1093,7 @@ func (cc *ClientConn) forceCloseConn() {
if !ok {
return
}
- if nc := tlsUnderlyingConn(tc); nc != nil {
+ if nc := tc.NetConn(); nc != nil {
nc.Close()
}
}
@@ -1053,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
// Wait for all in-flight streams to complete or connection to close
done := make(chan struct{})
cancelled := false // guarded by cc.mu
- go func() {
+ cc.goRun(func() {
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1065,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
if cancelled {
break
}
- cc.cond.Wait()
+ cc.condWait()
}
- }()
+ })
shutdownEnterWaitStateHook()
select {
case <-done:
@@ -1077,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
cc.mu.Lock()
// Free the goroutine above
cancelled = true
- cc.cond.Broadcast()
+ cc.condBroadcast()
cc.mu.Unlock()
return ctx.Err()
}
@@ -1115,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) {
for _, cs := range cc.streams {
cs.abortStreamLocked(err)
}
- cc.cond.Broadcast()
+ cc.condBroadcast()
cc.mu.Unlock()
cc.closeConn()
}
@@ -1212,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() {
}
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ return cc.roundTrip(req, nil)
+}
+
+func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) {
ctx := req.Context()
cs := &clientStream{
cc: cc,
@@ -1226,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
respHeaderRecv: make(chan struct{}),
donec: make(chan struct{}),
}
- go cs.doRequest(req)
+ cc.goRun(func() {
+ cs.doRequest(req)
+ })
waitDone := func() error {
+ if cc.syncHooks != nil {
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case <-cs.donec:
+ case <-ctx.Done():
+ case <-cs.reqCancel:
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case <-cs.donec:
return nil
@@ -1289,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return err
}
+ if streamf != nil {
+ streamf(cs)
+ }
+
for {
+ if cc.syncHooks != nil {
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case <-cs.respHeaderRecv:
+ case <-cs.abort:
+ case <-ctx.Done():
+ case <-cs.reqCancel:
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case <-cs.respHeaderRecv:
return handleResponseHeaders()
@@ -1345,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
if cc.reqHeaderMu == nil {
panic("RoundTrip on uninitialized ClientConn") // for tests
}
+ var newStreamHook func(*clientStream)
+ if cc.syncHooks != nil {
+ newStreamHook = cc.syncHooks.newstream
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case cc.reqHeaderMu <- struct{}{}:
+ <-cc.reqHeaderMu
+ case <-cs.reqCancel:
+ case <-ctx.Done():
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case cc.reqHeaderMu <- struct{}{}:
case <-cs.reqCancel:
@@ -1369,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
}
cc.mu.Unlock()
+ if newStreamHook != nil {
+ newStreamHook(cs)
+ }
+
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
if !cc.t.disableCompression() &&
req.Header.Get("Accept-Encoding") == "" &&
@@ -1449,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) {
var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 {
- timer := time.NewTimer(d)
+ timer := cc.newTimer(d)
defer timer.Stop()
- respHeaderTimer = timer.C
+ respHeaderTimer = timer.C()
respHeaderRecv = cs.respHeaderRecv
}
// Wait until the peer half-closes its end of the stream,
// or until the request is aborted (via context, error, or otherwise),
// whichever comes first.
for {
+ if cc.syncHooks != nil {
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case <-cs.peerClosed:
+ case <-respHeaderTimer:
+ case <-respHeaderRecv:
+ case <-cs.abort:
+ case <-ctx.Done():
+ case <-cs.reqCancel:
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case <-cs.peerClosed:
return nil
@@ -1606,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
return nil
}
cc.pendingRequests++
- cc.cond.Wait()
+ cc.condWait()
cc.pendingRequests--
select {
case <-cs.abort:
@@ -1677,7 +1824,27 @@ func (cs *clientStream) frameScratchBufferLen(maxFrameSize int) int {
return int(n) // doesn't truncate; max is 512K
}
-var bufPool sync.Pool // of *[]byte
+// Seven bufPools manage different frame sizes. This helps to avoid scenarios where long-running
+// streaming requests using small frame sizes occupy large buffers initially allocated for prior
+// requests needing big buffers. The size ranges are as follows:
+// {0 KB, 16 KB], {16 KB, 32 KB], {32 KB, 64 KB], {64 KB, 128 KB], {128 KB, 256 KB],
+// {256 KB, 512 KB], {512 KB, infinity}
+// In practice, the maximum scratch buffer size should not exceed 512 KB due to
+// frameScratchBufferLen(maxFrameSize), thus the "infinity pool" should never be used.
+// It exists mainly as a safety measure, for potential future increases in max buffer size.
+var bufPools [7]sync.Pool // of *[]byte
+func bufPoolIndex(size int) int {
+ if size <= 16384 {
+ return 0
+ }
+ size -= 1
+ bits := bits.Len(uint(size))
+ index := bits - 14
+ if index >= len(bufPools) {
+ return len(bufPools) - 1
+ }
+ return index
+}
func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
cc := cs.cc
@@ -1695,12 +1862,13 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
// Scratch buffer for reading into & writing from.
scratchLen := cs.frameScratchBufferLen(maxFrameSize)
var buf []byte
- if bp, ok := bufPool.Get().(*[]byte); ok && len(*bp) >= scratchLen {
- defer bufPool.Put(bp)
+ index := bufPoolIndex(scratchLen)
+ if bp, ok := bufPools[index].Get().(*[]byte); ok && len(*bp) >= scratchLen {
+ defer bufPools[index].Put(bp)
buf = *bp
} else {
buf = make([]byte, scratchLen)
- defer bufPool.Put(&buf)
+ defer bufPools[index].Put(&buf)
}
var sawEOF bool
@@ -1847,8 +2015,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
cs.flow.take(take)
return take, nil
}
- cc.cond.Wait()
+ cc.condWait()
+ }
+}
+
+func validateHeaders(hdrs http.Header) string {
+ for k, vv := range hdrs {
+ if !httpguts.ValidHeaderFieldName(k) {
+ return fmt.Sprintf("name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // Don't include the value in the error,
+ // because it may be sensitive.
+ return fmt.Sprintf("value for header %q", k)
+ }
+ }
}
+ return ""
}
var errNilRequestURL = errors.New("http2: Request.URI is nil")
@@ -1888,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
}
}
- // Check for any invalid headers and return an error before we
+ // Check for any invalid headers+trailers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
- for k, vv := range req.Header {
- if !httpguts.ValidHeaderFieldName(k) {
- return nil, fmt.Errorf("invalid HTTP header name %q", k)
- }
- for _, v := range vv {
- if !httpguts.ValidHeaderFieldValue(v) {
- // Don't include the value in the error, because it may be sensitive.
- return nil, fmt.Errorf("invalid HTTP header value for header %q", k)
- }
- }
+ if err := validateHeaders(req.Header); err != "" {
+ return nil, fmt.Errorf("invalid HTTP header %s", err)
+ }
+ if err := validateHeaders(req.Trailer); err != "" {
+ return nil, fmt.Errorf("invalid HTTP trailer %s", err)
}
enumerateHeaders := func(f func(name, value string)) {
@@ -2119,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
- cc.cond.Broadcast()
+ cc.condBroadcast()
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
@@ -2207,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() {
cs.abortStreamLocked(err)
}
}
- cc.cond.Broadcast()
+ cc.condBroadcast()
cc.mu.Unlock()
}
@@ -2242,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
readIdleTimeout := cc.t.ReadIdleTimeout
- var t *time.Timer
+ var t timer
if readIdleTimeout != 0 {
- t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
- defer t.Stop()
+ t = cc.afterFunc(readIdleTimeout, cc.healthCheck)
}
for {
f, err := cc.fr.ReadFrame()
@@ -2660,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
})
return nil
}
- if !cs.firstByte {
+ if !cs.pastHeaders {
cc.logf("protocol error: received DATA before a HEADERS frame")
rl.endStreamError(cs, StreamError{
StreamID: f.StreamID,
@@ -2843,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
for _, cs := range cc.streams {
cs.flow.add(delta)
}
- cc.cond.Broadcast()
+ cc.condBroadcast()
cc.initialWindowSize = s.Val
case SettingHeaderTableSize:
@@ -2887,9 +3065,18 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
fl = &cs.flow
}
if !fl.add(int32(f.Increment)) {
+ // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR
+ if cs != nil {
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeFlowControl,
+ })
+ return nil
+ }
+
return ConnectionError(ErrCodeFlowControl)
}
- cc.cond.Broadcast()
+ cc.condBroadcast()
return nil
}
@@ -2931,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
}
cc.mu.Unlock()
}
- errc := make(chan error, 1)
- go func() {
+ var pingError error
+ errc := make(chan struct{})
+ cc.goRun(func() {
cc.wmu.Lock()
defer cc.wmu.Unlock()
- if err := cc.fr.WritePing(false, p); err != nil {
- errc <- err
+ if pingError = cc.fr.WritePing(false, p); pingError != nil {
+ close(errc)
return
}
- if err := cc.bw.Flush(); err != nil {
- errc <- err
+ if pingError = cc.bw.Flush(); pingError != nil {
+ close(errc)
return
}
- }()
+ })
+ if cc.syncHooks != nil {
+ cc.syncHooks.blockUntil(func() bool {
+ select {
+ case <-c:
+ case <-errc:
+ case <-ctx.Done():
+ case <-cc.readerDone:
+ default:
+ return false
+ }
+ return true
+ })
+ }
select {
case <-c:
return nil
- case err := <-errc:
- return err
+ case <-errc:
+ return pingError
case <-ctx.Done():
return ctx.Err()
case <-cc.readerDone:
@@ -3117,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err
}
func (t *Transport) idleConnTimeout() time.Duration {
+ // to keep things backwards compatible, we use non-zero values of
+ // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying
+ // http1 transport, followed by 0
+ if t.IdleConnTimeout != 0 {
+ return t.IdleConnTimeout
+ }
+
if t.t1 != nil {
return t.t1.IdleConnTimeout
}
+
return 0
}
@@ -3177,3 +3386,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
trace.GotFirstResponseByte()
}
}
+
+func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
+ return trace != nil && trace.WroteHeaderField != nil
+}
+
+func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(k, []string{v})
+ }
+}
+
+func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
+ if trace != nil {
+ return trace.Got1xxResponse
+ }
+ return nil
+}
+
+// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS
+// connection.
+func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) {
+ dialer := &tls.Dialer{
+ Config: cfg,
+ }
+ cn, err := dialer.DialContext(ctx, network, addr)
+ if err != nil {
+ return nil, err
+ }
+ tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed
+ return tlsCn, nil
+}
diff --git a/http2/transport_go117_test.go b/http2/transport_go117_test.go
deleted file mode 100644
index f5d4e0c1a..000000000
--- a/http2/transport_go117_test.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.17
-// +build go1.17
-
-package http2
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "net/http"
- "net/http/httptest"
-
- "testing"
-)
-
-func TestTransportDialTLSContext(t *testing.T) {
- blockCh := make(chan struct{})
- serverTLSConfigFunc := func(ts *httptest.Server) {
- ts.Config.TLSConfig = &tls.Config{
- // Triggers the server to request the clients certificate
- // during TLS handshake.
- ClientAuth: tls.RequestClientCert,
- }
- }
- ts := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {},
- optOnlyServer,
- serverTLSConfigFunc,
- )
- defer ts.Close()
- tr := &Transport{
- TLSClientConfig: &tls.Config{
- GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- // Tests that the context provided to `req` is
- // passed into this function.
- close(blockCh)
- <-cri.Context().Done()
- return nil, cri.Context().Err()
- },
- InsecureSkipVerify: true,
- },
- }
- defer tr.CloseIdleConnections()
- req, err := http.NewRequest(http.MethodGet, ts.ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- req = req.WithContext(ctx)
- errCh := make(chan error)
- go func() {
- defer close(errCh)
- res, err := tr.RoundTrip(req)
- if err != nil {
- errCh <- err
- return
- }
- res.Body.Close()
- }()
- // Wait for GetClientCertificate handler to be called
- <-blockCh
- // Cancel the context
- cancel()
- // Expect the cancellation error here
- err = <-errCh
- if err == nil {
- t.Fatal("cancelling context during client certificate fetch did not error as expected")
- return
- }
- if !errors.Is(err, context.Canceled) {
- t.Fatalf("unexpected error returned after cancellation: %v", err)
- }
-}
-
-// TestDialRaceResumesDial tests that, given two concurrent requests
-// to the same address, when the first Dial is interrupted because
-// the first request's context is cancelled, the second request
-// resumes the dial automatically.
-func TestDialRaceResumesDial(t *testing.T) {
- blockCh := make(chan struct{})
- serverTLSConfigFunc := func(ts *httptest.Server) {
- ts.Config.TLSConfig = &tls.Config{
- // Triggers the server to request the clients certificate
- // during TLS handshake.
- ClientAuth: tls.RequestClientCert,
- }
- }
- ts := newServerTester(t,
- func(w http.ResponseWriter, r *http.Request) {},
- optOnlyServer,
- serverTLSConfigFunc,
- )
- defer ts.Close()
- tr := &Transport{
- TLSClientConfig: &tls.Config{
- GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- select {
- case <-blockCh:
- // If we already errored, return without error.
- return &tls.Certificate{}, nil
- default:
- }
- close(blockCh)
- <-cri.Context().Done()
- return nil, cri.Context().Err()
- },
- InsecureSkipVerify: true,
- },
- }
- defer tr.CloseIdleConnections()
- req, err := http.NewRequest(http.MethodGet, ts.ts.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- // Create two requests with independent cancellation.
- ctx1, cancel1 := context.WithCancel(context.Background())
- defer cancel1()
- req1 := req.WithContext(ctx1)
- ctx2, cancel2 := context.WithCancel(context.Background())
- defer cancel2()
- req2 := req.WithContext(ctx2)
- errCh := make(chan error)
- go func() {
- res, err := tr.RoundTrip(req1)
- if err != nil {
- errCh <- err
- return
- }
- res.Body.Close()
- }()
- successCh := make(chan struct{})
- go func() {
- // Don't start request until first request
- // has initiated the handshake.
- <-blockCh
- res, err := tr.RoundTrip(req2)
- if err != nil {
- errCh <- err
- return
- }
- res.Body.Close()
- // Close successCh to indicate that the second request
- // made it to the server successfully.
- close(successCh)
- }()
- // Wait for GetClientCertificate handler to be called
- <-blockCh
- // Cancel the context first
- cancel1()
- // Expect the cancellation error here
- err = <-errCh
- if err == nil {
- t.Fatal("cancelling context during client certificate fetch did not error as expected")
- return
- }
- if !errors.Is(err, context.Canceled) {
- t.Fatalf("unexpected error returned after cancellation: %v", err)
- }
- select {
- case err := <-errCh:
- t.Fatalf("unexpected second error: %v", err)
- case <-successCh:
- }
-}
diff --git a/http2/transport_test.go b/http2/transport_test.go
index d3156208c..11ff67b4c 100644
--- a/http2/transport_test.go
+++ b/http2/transport_test.go
@@ -95,6 +95,88 @@ func startH2cServer(t *testing.T) net.Listener {
return l
}
+func TestIdleConnTimeout(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ idleConnTimeout time.Duration
+ wait time.Duration
+ baseTransport *http.Transport
+ wantNewConn bool
+ }{{
+ name: "NoExpiry",
+ idleConnTimeout: 2 * time.Second,
+ wait: 1 * time.Second,
+ baseTransport: nil,
+ wantNewConn: false,
+ }, {
+ name: "H2TransportTimeoutExpires",
+ idleConnTimeout: 1 * time.Second,
+ wait: 2 * time.Second,
+ baseTransport: nil,
+ wantNewConn: true,
+ }, {
+ name: "H1TransportTimeoutExpires",
+ idleConnTimeout: 0 * time.Second,
+ wait: 1 * time.Second,
+ baseTransport: &http.Transport{
+ IdleConnTimeout: 2 * time.Second,
+ },
+ wantNewConn: false,
+ }} {
+ t.Run(test.name, func(t *testing.T) {
+ tt := newTestTransport(t, func(tr *Transport) {
+ tr.IdleConnTimeout = test.idleConnTimeout
+ })
+ var tc *testClientConn
+ for i := 0; i < 3; i++ {
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tt.roundTrip(req)
+
+ // This request happens on a new conn if it's the first request
+ // (and there is no cached conn), or if the test timeout is long
+ // enough that old conns are being closed.
+ wantConn := i == 0 || test.wantNewConn
+ if has := tt.hasConn(); has != wantConn {
+ t.Fatalf("request %v: hasConn=%v, want %v", i, has, wantConn)
+ }
+ if wantConn {
+ tc = tt.getConn()
+ // Read client's SETTINGS and first WINDOW_UPDATE,
+ // send our SETTINGS.
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+ tc.writeSettings()
+ }
+ if tt.hasConn() {
+ t.Fatalf("request %v: Transport has more than one conn", i)
+ }
+
+ // Respond to the client's request.
+ hf := testClientConnReadFrame[*MetaHeadersFrame](tc)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: hf.StreamID,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt.wantStatus(200)
+
+ // If this was a newly-accepted conn, read the SETTINGS ACK.
+ if wantConn {
+ tc.wantFrameType(FrameSettings) // ACK to our settings
+ }
+
+ tt.advance(test.wait)
+ if got, want := tc.netConnClosed, test.wantNewConn; got != want {
+ t.Fatalf("after waiting %v, conn closed=%v; want %v", test.wait, got, want)
+ }
+ }
+ })
+ }
+}
+
func TestTransportH2c(t *testing.T) {
l := startH2cServer(t)
defer l.Close()
@@ -740,53 +822,6 @@ func (fw flushWriter) Write(p []byte) (n int, err error) {
return
}
-type clientTester struct {
- t *testing.T
- tr *Transport
- sc, cc net.Conn // server and client conn
- fr *Framer // server's framer
- settings *SettingsFrame
- client func() error
- server func() error
-}
-
-func newClientTester(t *testing.T) *clientTester {
- var dialOnce struct {
- sync.Mutex
- dialed bool
- }
- ct := &clientTester{
- t: t,
- }
- ct.tr = &Transport{
- TLSClientConfig: tlsConfigInsecure,
- DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
- dialOnce.Lock()
- defer dialOnce.Unlock()
- if dialOnce.dialed {
- return nil, errors.New("only one dial allowed in test mode")
- }
- dialOnce.dialed = true
- return ct.cc, nil
- },
- }
-
- ln := newLocalListener(t)
- cc, err := net.Dial("tcp", ln.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- sc, err := ln.Accept()
- if err != nil {
- t.Fatal(err)
- }
- ln.Close()
- ct.cc = cc
- ct.sc = sc
- ct.fr = NewFramer(sc, sc)
- return ct
-}
-
func newLocalListener(t *testing.T) net.Listener {
ln, err := net.Listen("tcp4", "127.0.0.1:0")
if err == nil {
@@ -799,284 +834,70 @@ func newLocalListener(t *testing.T) net.Listener {
return ln
}
-func (ct *clientTester) greet(settings ...Setting) {
- buf := make([]byte, len(ClientPreface))
- _, err := io.ReadFull(ct.sc, buf)
- if err != nil {
- ct.t.Fatalf("reading client preface: %v", err)
- }
- f, err := ct.fr.ReadFrame()
- if err != nil {
- ct.t.Fatalf("Reading client settings frame: %v", err)
- }
- var ok bool
- if ct.settings, ok = f.(*SettingsFrame); !ok {
- ct.t.Fatalf("Wanted client settings frame; got %v", f)
- }
- if err := ct.fr.WriteSettings(settings...); err != nil {
- ct.t.Fatal(err)
- }
- if err := ct.fr.WriteSettingsAck(); err != nil {
- ct.t.Fatal(err)
- }
-}
-
-func (ct *clientTester) readNonSettingsFrame() (Frame, error) {
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return nil, err
- }
- if _, ok := f.(*SettingsFrame); ok {
- continue
- }
- return f, nil
- }
-}
-
-// writeReadPing sends a PING and immediately reads the PING ACK.
-// It will fail if any other unread data was pending on the connection,
-// aside from SETTINGS frames.
-func (ct *clientTester) writeReadPing() error {
- data := [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
- if err := ct.fr.WritePing(false, data); err != nil {
- return fmt.Errorf("Error writing PING: %v", err)
- }
- f, err := ct.readNonSettingsFrame()
- if err != nil {
- return err
- }
- p, ok := f.(*PingFrame)
- if !ok {
- return fmt.Errorf("got a %v, want a PING ACK", f)
- }
- if p.Flags&FlagPingAck == 0 {
- return fmt.Errorf("got a PING, want a PING ACK")
- }
- if p.Data != data {
- return fmt.Errorf("got PING data = %x, want %x", p.Data, data)
- }
- return nil
-}
-
-func (ct *clientTester) inflowWindow(streamID uint32) int32 {
- pool := ct.tr.connPoolOrDef.(*clientConnPool)
- pool.mu.Lock()
- defer pool.mu.Unlock()
- if n := len(pool.keys); n != 1 {
- ct.t.Errorf("clientConnPool contains %v keys, expected 1", n)
- return -1
- }
- for cc := range pool.keys {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- if streamID == 0 {
- return cc.inflow.avail + cc.inflow.unsent
- }
- cs := cc.streams[streamID]
- if cs == nil {
- ct.t.Errorf("no stream with id %v", streamID)
- return -1
- }
- return cs.inflow.avail + cs.inflow.unsent
- }
- return -1
-}
-
-func (ct *clientTester) cleanup() {
- ct.tr.CloseIdleConnections()
-
- // close both connections, ignore the error if its already closed
- ct.sc.Close()
- ct.cc.Close()
-}
-
-func (ct *clientTester) run() {
- var errOnce sync.Once
- var wg sync.WaitGroup
-
- run := func(which string, fn func() error) {
- defer wg.Done()
- if err := fn(); err != nil {
- errOnce.Do(func() {
- ct.t.Errorf("%s: %v", which, err)
- ct.cleanup()
- })
- }
- }
+func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) }
+func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) }
- wg.Add(2)
- go run("client", ct.client)
- go run("server", ct.server)
- wg.Wait()
+func testTransportReqBodyAfterResponse(t *testing.T, status int) {
+ const bodySize = 10 << 20
- errOnce.Do(ct.cleanup) // clean up if no error
-}
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ body := tc.newRequestBody()
+ body.writeBytes(bodySize / 2)
+ req, _ := http.NewRequest("PUT", "https://dummy.tld/", body)
+ rt := tc.roundTrip(req)
+
+ tc.wantHeaders(wantHeader{
+ streamID: rt.streamID(),
+ endStream: false,
+ header: http.Header{
+ ":authority": []string{"dummy.tld"},
+ ":method": []string{"PUT"},
+ ":path": []string{"/"},
+ },
+ })
-func (ct *clientTester) readFrame() (Frame, error) {
- return ct.fr.ReadFrame()
-}
+ // Provide enough congestion window for the full request body.
+ tc.writeWindowUpdate(0, bodySize)
+ tc.writeWindowUpdate(rt.streamID(), bodySize)
-func (ct *clientTester) firstHeaders() (*HeadersFrame, error) {
- for {
- f, err := ct.readFrame()
- if err != nil {
- return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
- }
- switch f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- continue
- }
- hf, ok := f.(*HeadersFrame)
- if !ok {
- return nil, fmt.Errorf("Got %T; want HeadersFrame", f)
- }
- return hf, nil
- }
-}
+ tc.wantData(wantData{
+ streamID: rt.streamID(),
+ endStream: false,
+ size: bodySize / 2,
+ })
-type countingReader struct {
- n *int64
-}
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", strconv.Itoa(status),
+ ),
+ })
-func (r countingReader) Read(p []byte) (n int, err error) {
- for i := range p {
- p[i] = byte(i)
+ res := rt.response()
+ if res.StatusCode != status {
+ t.Fatalf("status code = %v; want %v", res.StatusCode, status)
}
- atomic.AddInt64(r.n, int64(len(p)))
- return len(p), err
-}
-func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) }
-func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) }
+ body.writeBytes(bodySize / 2)
+ body.closeWithError(io.EOF)
-func testTransportReqBodyAfterResponse(t *testing.T, status int) {
- const bodySize = 10 << 20
- clientDone := make(chan struct{})
- ct := newClientTester(t)
- recvLen := make(chan int64, 1)
- ct.client = func() error {
- defer ct.cc.(*net.TCPConn).CloseWrite()
- if runtime.GOOS == "plan9" {
- // CloseWrite not supported on Plan 9; Issue 17906
- defer ct.cc.(*net.TCPConn).Close()
- }
- defer close(clientDone)
-
- body := &pipe{b: new(bytes.Buffer)}
- io.Copy(body, io.LimitReader(neverEnding('A'), bodySize/2))
- req, err := http.NewRequest("PUT", "https://dummy.tld/", body)
- if err != nil {
- return err
- }
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- if res.StatusCode != status {
- return fmt.Errorf("status code = %v; want %v", res.StatusCode, status)
- }
- io.Copy(body, io.LimitReader(neverEnding('A'), bodySize/2))
- body.CloseWithError(io.EOF)
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return fmt.Errorf("Slurp: %v", err)
- }
- if len(slurp) > 0 {
- return fmt.Errorf("unexpected body: %q", slurp)
- }
- res.Body.Close()
- if status == 200 {
- if got := <-recvLen; got != bodySize {
- return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize)
- }
- } else {
- if got := <-recvLen; got == 0 || got >= bodySize {
- return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize)
- }
- }
- return nil
+ if status == 200 {
+ // After a 200 response, client sends the remaining request body.
+ tc.wantData(wantData{
+ streamID: rt.streamID(),
+ endStream: true,
+ size: bodySize / 2,
+ })
+ } else {
+ // After a 403 response, client gives up and resets the stream.
+ tc.wantFrameType(FrameRSTStream)
}
- ct.server = func() error {
- ct.greet()
- defer close(recvLen)
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- var dataRecv int64
- var closed bool
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- select {
- case <-clientDone:
- // If the client's done, it
- // will have reported any
- // errors on its side.
- return nil
- default:
- return err
- }
- }
- //println(fmt.Sprintf("server got frame: %v", f))
- ended := false
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *HeadersFrame:
- if !f.HeadersEnded() {
- return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
- }
- if f.StreamEnded() {
- return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f)
- }
- case *DataFrame:
- dataLen := len(f.Data())
- if dataLen > 0 {
- if dataRecv == 0 {
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- }
- if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {
- return err
- }
- if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {
- return err
- }
- }
- dataRecv += int64(dataLen)
-
- if !closed && ((status != 200 && dataRecv > 0) ||
- (status == 200 && f.StreamEnded())) {
- closed = true
- if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil {
- return err
- }
- }
- if f.StreamEnded() {
- ended = true
- }
- case *RSTStreamFrame:
- if status == 200 {
- return fmt.Errorf("Unexpected client frame %v", f)
- }
- ended = true
- default:
- return fmt.Errorf("Unexpected client frame %v", f)
- }
- if ended {
- select {
- case recvLen <- dataRecv:
- default:
- }
- }
- }
- }
- ct.run()
+ rt.wantBody(nil)
}
// See golang.org/issue/13444
@@ -1257,121 +1078,74 @@ func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerTy
panic("invalid combination")
}
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody))
- if expect100Continue != noHeader {
- req.Header.Set("Expect", "100-continue")
- }
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- return fmt.Errorf("status code = %v; want 200", res.StatusCode)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return fmt.Errorf("Slurp: %v", err)
- }
- wantBody := resBody
- if !withData {
- wantBody = ""
- }
- if string(slurp) != wantBody {
- return fmt.Errorf("body = %q; want %q", slurp, wantBody)
- }
- if trailers == noHeader {
- if len(res.Trailer) > 0 {
- t.Errorf("Trailer = %v; want none", res.Trailer)
- }
- } else {
- want := http.Header{"Some-Trailer": {"some-value"}}
- if !reflect.DeepEqual(res.Trailer, want) {
- t.Errorf("Trailer = %v; want %v", res.Trailer, want)
- }
- }
- return nil
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody))
+ if expect100Continue != noHeader {
+ req.Header.Set("Expect", "100-continue")
}
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
+ rt := tc.roundTrip(req)
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- endStream := false
- send := func(mode headerType) {
- hbf := buf.Bytes()
- switch mode {
- case oneHeader:
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.Header().StreamID,
- EndHeaders: true,
- EndStream: endStream,
- BlockFragment: hbf,
- })
- case splitHeader:
- if len(hbf) < 2 {
- panic("too small")
- }
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.Header().StreamID,
- EndHeaders: false,
- EndStream: endStream,
- BlockFragment: hbf[:1],
- })
- ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:])
- default:
- panic("bogus mode")
- }
- }
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *DataFrame:
- if !f.StreamEnded() {
- // No need to send flow control tokens. The test request body is tiny.
- continue
- }
- // Response headers (1+ frames; 1 or 2 in this test, but never 0)
- {
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"})
- enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"})
- if trailers != noHeader {
- enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"})
- }
- endStream = withData == false && trailers == noHeader
- send(resHeader)
- }
- if withData {
- endStream = trailers == noHeader
- ct.fr.WriteData(f.StreamID, endStream, []byte(resBody))
- }
- if trailers != noHeader {
- endStream = true
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"})
- send(trailers)
- }
- if endStream {
- return nil
- }
- case *HeadersFrame:
- if expect100Continue != noHeader {
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
- send(expect100Continue)
- }
- }
- }
+ tc.wantFrameType(FrameHeaders)
+
+ // Possibly 100-continue, or skip when noHeader.
+ tc.writeHeadersMode(expect100Continue, HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "100",
+ ),
+ })
+
+ // Client sends request body.
+ tc.wantData(wantData{
+ streamID: rt.streamID(),
+ endStream: true,
+ size: len(reqBody),
+ })
+
+ hdr := []string{
+ ":status", "200",
+ "x-foo", "blah",
+ "x-bar", "more",
+ }
+ if trailers != noHeader {
+ hdr = append(hdr, "trailer", "some-trailer")
+ }
+ tc.writeHeadersMode(resHeader, HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: withData == false && trailers == noHeader,
+ BlockFragment: tc.makeHeaderBlockFragment(hdr...),
+ })
+ if withData {
+ endStream := trailers == noHeader
+ tc.writeData(rt.streamID(), endStream, []byte(resBody))
+ }
+ tc.writeHeadersMode(trailers, HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ "some-trailer", "some-value",
+ ),
+ })
+
+ rt.wantStatus(200)
+ if !withData {
+ rt.wantBody(nil)
+ } else {
+ rt.wantBody([]byte(resBody))
+ }
+ if trailers == noHeader {
+ rt.wantTrailers(nil)
+ } else {
+ rt.wantTrailers(http.Header{
+ "Some-Trailer": {"some-value"},
+ })
}
- ct.run()
}
// Issue 26189, Issue 17739: ignore unknown 1xx responses
@@ -1383,130 +1157,76 @@ func TestTransportUnknown1xx(t *testing.T) {
return nil
}
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- defer res.Body.Close()
- if res.StatusCode != 204 {
- return fmt.Errorf("status code = %v; want 204", res.StatusCode)
- }
- want := `code=110 header=map[Foo-Bar:[110]]
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ for i := 110; i <= 114; i++ {
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", fmt.Sprint(i),
+ "foo-bar", fmt.Sprint(i),
+ ),
+ })
+ }
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "204",
+ ),
+ })
+
+ res := rt.response()
+ if res.StatusCode != 204 {
+ t.Fatalf("status code = %v; want 204", res.StatusCode)
+ }
+ want := `code=110 header=map[Foo-Bar:[110]]
code=111 header=map[Foo-Bar:[111]]
code=112 header=map[Foo-Bar:[112]]
code=113 header=map[Foo-Bar:[113]]
code=114 header=map[Foo-Bar:[114]]
`
- if got := buf.String(); got != want {
- t.Errorf("Got trace:\n%s\nWant:\n%s", got, want)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
-
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *HeadersFrame:
- for i := 110; i <= 114; i++ {
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: fmt.Sprint(i)})
- enc.WriteField(hpack.HeaderField{Name: "foo-bar", Value: fmt.Sprint(i)})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- }
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- return nil
- }
- }
+ if got := buf.String(); got != want {
+ t.Errorf("Got trace:\n%s\nWant:\n%s", got, want)
}
- ct.run()
-
}
func TestTransportReceiveUndeclaredTrailer(t *testing.T) {
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- return fmt.Errorf("status code = %v; want 200", res.StatusCode)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil)
- }
- if len(slurp) > 0 {
- return fmt.Errorf("body = %q; want nothing", slurp)
- }
- if _, ok := res.Trailer["Some-Trailer"]; !ok {
- return fmt.Errorf("expected Some-Trailer")
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
-
- var n int
- var hf *HeadersFrame
- for hf == nil && n < 10 {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- hf, _ = f.(*HeadersFrame)
- n++
- }
-
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
-
- // send headers without Trailer header
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ "some-trailer", "I'm an undeclared Trailer!",
+ ),
+ })
- // send trailers
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- return nil
- }
- ct.run()
+ rt.wantStatus(200)
+ rt.wantBody(nil)
+ rt.wantTrailers(http.Header{
+ "Some-Trailer": []string{"I'm an undeclared Trailer!"},
+ })
}
func TestTransportInvalidTrailer_Pseudo1(t *testing.T) {
@@ -1516,10 +1236,10 @@ func TestTransportInvalidTrailer_Pseudo2(t *testing.T) {
testTransportInvalidTrailer_Pseudo(t, splitHeader)
}
func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) {
- testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) {
- enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"})
- enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
- })
+ testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"),
+ ":colon", "foo",
+ "foo", "bar",
+ )
}
func TestTransportInvalidTrailer_Capital1(t *testing.T) {
@@ -1529,102 +1249,54 @@ func TestTransportInvalidTrailer_Capital2(t *testing.T) {
testTransportInvalidTrailer_Capital(t, splitHeader)
}
func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) {
- testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) {
- enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
- enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"})
- })
+ testInvalidTrailer(t, trailers, headerFieldNameError("Capital"),
+ "foo", "bar",
+ "Capital", "bad",
+ )
}
func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) {
- testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) {
- enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"})
- })
+ testInvalidTrailer(t, oneHeader, headerFieldNameError(""),
+ "", "bad",
+ )
}
func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) {
- testInvalidTrailer(t, oneHeader, headerFieldValueError("x"), func(enc *hpack.Encoder) {
- enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"})
- })
+ testInvalidTrailer(t, oneHeader, headerFieldValueError("x"),
+ "x", "has\nnewline",
+ )
}
-func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) {
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- return fmt.Errorf("status code = %v; want 200", res.StatusCode)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- se, ok := err.(StreamError)
- if !ok || se.Cause != wantErr {
- return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr)
- }
- if len(slurp) > 0 {
- return fmt.Errorf("body = %q; want nothing", slurp)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
+func testInvalidTrailer(t *testing.T, mode headerType, wantErr error, trailers ...string) {
+ tc := newTestClientConn(t)
+ tc.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *HeadersFrame:
- var endStream bool
- send := func(mode headerType) {
- hbf := buf.Bytes()
- switch mode {
- case oneHeader:
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: endStream,
- BlockFragment: hbf,
- })
- case splitHeader:
- if len(hbf) < 2 {
- panic("too small")
- }
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: false,
- EndStream: endStream,
- BlockFragment: hbf[:1],
- })
- ct.fr.WriteContinuation(f.StreamID, true, hbf[1:])
- default:
- panic("bogus mode")
- }
- }
- // Response headers (1+ frames; 1 or 2 in this test, but never 0)
- {
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"})
- endStream = false
- send(oneHeader)
- }
- // Trailers:
- {
- endStream = true
- buf.Reset()
- writeTrailer(enc)
- send(trailers)
- }
- return nil
- }
- }
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ "trailer", "declared",
+ ),
+ })
+ tc.writeHeadersMode(mode, HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(trailers...),
+ })
+
+ rt.wantStatus(200)
+ body, err := rt.readBody()
+ se, ok := err.(StreamError)
+ if !ok || se.Cause != wantErr {
+ t.Fatalf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", body, err, wantErr, wantErr)
+ }
+ if len(body) > 0 {
+ t.Fatalf("body = %q; want nothing", body)
}
- ct.run()
}
// headerListSize returns the HTTP2 header list size of h.
@@ -1900,115 +1572,80 @@ func TestTransportChecksRequestHeaderListSize(t *testing.T) {
}
func TestTransportChecksResponseHeaderListSize(t *testing.T) {
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if e, ok := err.(StreamError); ok {
- err = e.Cause
- }
- if err != errResponseHeaderListSize {
- size := int64(0)
- if res != nil {
- res.Body.Close()
- for k, vv := range res.Header {
- for _, v := range vv {
- size += int64(len(k)) + int64(len(v)) + 32
- }
- }
- }
- return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+
+ hdr := []string{":status", "200"}
+ large := strings.Repeat("a", 1<<10)
+ for i := 0; i < 5042; i++ {
+ hdr = append(hdr, large, large)
+ }
+ hbf := tc.makeHeaderBlockFragment(hdr...)
+ // Note: this number might change if our hpack implementation changes.
+ // That's fine. This is just a sanity check that our response can fit in a single
+ // header block fragment frame.
+ if size, want := len(hbf), 6329; size != want {
+ t.Fatalf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want)
+ }
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: hbf,
+ })
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *HeadersFrame:
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- large := strings.Repeat("a", 1<<10)
- for i := 0; i < 5042; i++ {
- enc.WriteField(hpack.HeaderField{Name: large, Value: large})
- }
- if size, want := buf.Len(), 6329; size != want {
- // Note: this number might change if
- // our hpack implementation
- // changes. That's fine. This is
- // just a sanity check that our
- // response can fit in a single
- // header block fragment frame.
- return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want)
+ res, err := rt.result()
+ if e, ok := err.(StreamError); ok {
+ err = e.Cause
+ }
+ if err != errResponseHeaderListSize {
+ size := int64(0)
+ if res != nil {
+ res.Body.Close()
+ for k, vv := range res.Header {
+ for _, v := range vv {
+ size += int64(len(k)) + int64(len(v)) + 32
}
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- return nil
}
}
+ t.Fatalf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size)
}
- ct.run()
}
func TestTransportCookieHeaderSplit(t *testing.T) {
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- req.Header.Add("Cookie", "a=b;c=d; e=f;")
- req.Header.Add("Cookie", "e=f;g=h; ")
- req.Header.Add("Cookie", "i=j")
- _, err := ct.tr.RoundTrip(req)
- return err
- }
- ct.server = func() error {
- ct.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *HeadersFrame:
- dec := hpack.NewDecoder(initialHeaderTableSize, nil)
- hfs, err := dec.DecodeFull(f.HeaderBlockFragment())
- if err != nil {
- return err
- }
- got := []string{}
- want := []string{"a=b", "c=d", "e=f", "e=f", "g=h", "i=j"}
- for _, hf := range hfs {
- if hf.Name == "cookie" {
- got = append(got, hf.Value)
- }
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Cookies = %#v, want %#v", got, want)
- }
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ req.Header.Add("Cookie", "a=b;c=d; e=f;")
+ req.Header.Add("Cookie", "e=f;g=h; ")
+ req.Header.Add("Cookie", "i=j")
+ rt := tc.roundTrip(req)
+
+ tc.wantHeaders(wantHeader{
+ streamID: rt.streamID(),
+ endStream: true,
+ header: http.Header{
+ "cookie": []string{"a=b", "c=d", "e=f", "e=f", "g=h", "i=j"},
+ },
+ })
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "204",
+ ),
+ })
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- return nil
- }
- }
+ if err := rt.err(); err != nil {
+ t.Fatalf("RoundTrip = %v, want success", err)
}
- ct.run()
}
// Test that the Transport returns a typed error from Response.Body.Read calls
@@ -2224,55 +1861,49 @@ func TestTransportResponseHeaderTimeout_Body(t *testing.T) {
}
func testTransportResponseHeaderTimeout(t *testing.T, body bool) {
- ct := newClientTester(t)
- ct.tr.t1 = &http.Transport{
- ResponseHeaderTimeout: 5 * time.Millisecond,
- }
- ct.client = func() error {
- c := &http.Client{Transport: ct.tr}
- var err error
- var n int64
- const bodySize = 4 << 20
- if body {
- _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize))
- } else {
- _, err = c.Get("https://dummy.tld/")
- }
- if !isTimeout(err) {
- t.Errorf("client expected timeout error; got %#v", err)
+ const bodySize = 4 << 20
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.t1 = &http.Transport{
+ ResponseHeaderTimeout: 5 * time.Millisecond,
}
- if body && n != bodySize {
- t.Errorf("only read %d bytes of body; want %d", n, bodySize)
- }
- return nil
+ })
+ tc.greet()
+
+ var req *http.Request
+ var reqBody *testRequestBody
+ if body {
+ reqBody = tc.newRequestBody()
+ reqBody.writeBytes(bodySize)
+ reqBody.closeWithError(io.EOF)
+ req, _ = http.NewRequest("POST", "https://dummy.tld/", reqBody)
+ req.Header.Set("Content-Type", "text/foo")
+ } else {
+ req, _ = http.NewRequest("GET", "https://dummy.tld/", nil)
}
- ct.server = func() error {
- ct.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- t.Logf("ReadFrame: %v", err)
- return nil
- }
- switch f := f.(type) {
- case *DataFrame:
- dataLen := len(f.Data())
- if dataLen > 0 {
- if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil {
- return err
- }
- if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil {
- return err
- }
- }
- case *RSTStreamFrame:
- if f.StreamID == 1 && f.ErrCode == ErrCodeCancel {
- return nil
- }
- }
- }
+
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+
+ tc.writeWindowUpdate(0, bodySize)
+ tc.writeWindowUpdate(rt.streamID(), bodySize)
+
+ if body {
+ tc.wantData(wantData{
+ endStream: true,
+ size: bodySize,
+ })
+ }
+
+ tc.advance(4 * time.Millisecond)
+ if rt.done() {
+ t.Fatalf("RoundTrip is done after 4ms; want still waiting")
+ }
+ tc.advance(1 * time.Millisecond)
+
+ if err := rt.err(); !isTimeout(err) {
+ t.Fatalf("RoundTrip error: %v; want timeout error", err)
}
- ct.run()
}
func TestTransportDisableCompression(t *testing.T) {
@@ -2484,7 +2115,8 @@ func TestTransportRejectsContentLengthWithSign(t *testing.T) {
}
// golang.org/issue/14048
-func TestTransportFailsOnInvalidHeaders(t *testing.T) {
+// golang.org/issue/64766
+func TestTransportFailsOnInvalidHeadersAndTrailers(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
var got []string
for k := range r.Header {
@@ -2497,6 +2129,7 @@ func TestTransportFailsOnInvalidHeaders(t *testing.T) {
tests := [...]struct {
h http.Header
+ t http.Header
wantErr string
}{
0: {
@@ -2515,6 +2148,14 @@ func TestTransportFailsOnInvalidHeaders(t *testing.T) {
h: http.Header{"foo": {"foo\x01bar"}},
wantErr: `invalid HTTP header value for header "foo"`,
},
+ 4: {
+ t: http.Header{"foo": {"foo\x01bar"}},
+ wantErr: `invalid HTTP trailer value for header "foo"`,
+ },
+ 5: {
+ t: http.Header{"x-\r\nda": {"foo\x01bar"}},
+ wantErr: `invalid HTTP trailer name "x-\r\nda"`,
+ },
}
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
@@ -2523,6 +2164,7 @@ func TestTransportFailsOnInvalidHeaders(t *testing.T) {
for i, tt := range tests {
req, _ := http.NewRequest("GET", st.ts.URL, nil)
req.Header = tt.h
+ req.Trailer = tt.t
res, err := tr.RoundTrip(req)
var bad bool
if tt.wantErr == "" {
@@ -2658,115 +2300,61 @@ func TestTransportNewTLSConfig(t *testing.T) {
// without END_STREAM, followed by a 0-length DATA frame with
// END_STREAM. Make sure we don't get confused by that. (We did.)
func TestTransportReadHeadResponse(t *testing.T) {
- ct := newClientTester(t)
- clientDone := make(chan struct{})
- ct.client = func() error {
- defer close(clientDone)
- req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return err
- }
- if res.ContentLength != 123 {
- return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return fmt.Errorf("ReadAll: %v", err)
- }
- if len(slurp) > 0 {
- return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- t.Logf("ReadFrame: %v", err)
- return nil
- }
- hf, ok := f.(*HeadersFrame)
- if !ok {
- continue
- }
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: false, // as the GFE does
- BlockFragment: buf.Bytes(),
- })
- ct.fr.WriteData(hf.StreamID, true, nil)
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false, // as the GFE does
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ "content-length", "123",
+ ),
+ })
+ tc.writeData(rt.streamID(), true, nil)
- <-clientDone
- return nil
- }
+ res := rt.response()
+ if res.ContentLength != 123 {
+ t.Fatalf("Content-Length = %d; want 123", res.ContentLength)
}
- ct.run()
+ rt.wantBody(nil)
}
func TestTransportReadHeadResponseWithBody(t *testing.T) {
- // This test use not valid response format.
- // Discarding logger output to not spam tests output.
- log.SetOutput(ioutil.Discard)
+ // This test uses an invalid response format.
+ // Discard logger output to not spam tests output.
+ log.SetOutput(io.Discard)
defer log.SetOutput(os.Stderr)
response := "redirecting to /elsewhere"
- ct := newClientTester(t)
- clientDone := make(chan struct{})
- ct.client = func() error {
- defer close(clientDone)
- req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return err
- }
- if res.ContentLength != int64(len(response)) {
- return fmt.Errorf("Content-Length = %d; want %d", res.ContentLength, len(response))
- }
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return fmt.Errorf("ReadAll: %v", err)
- }
- if len(slurp) > 0 {
- return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- t.Logf("ReadFrame: %v", err)
- return nil
- }
- hf, ok := f.(*HeadersFrame)
- if !ok {
- continue
- }
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "content-length", Value: strconv.Itoa(len(response))})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- ct.fr.WriteData(hf.StreamID, true, []byte(response))
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ "content-length", strconv.Itoa(len(response)),
+ ),
+ })
+ tc.writeData(rt.streamID(), true, []byte(response))
- <-clientDone
- return nil
- }
+ res := rt.response()
+ if res.ContentLength != int64(len(response)) {
+ t.Fatalf("Content-Length = %d; want %d", res.ContentLength, len(response))
}
- ct.run()
+ rt.wantBody(nil)
}
type neverEnding byte
@@ -2891,190 +2479,125 @@ func TestTransportUsesGoAwayDebugError_Body(t *testing.T) {
}
func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) {
- ct := newClientTester(t)
- clientDone := make(chan struct{})
+ tc := newTestClientConn(t)
+ tc.greet()
const goAwayErrCode = ErrCodeHTTP11Required // arbitrary
const goAwayDebugData = "some debug data"
- ct.client = func() error {
- defer close(clientDone)
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if failMidBody {
- if err != nil {
- return fmt.Errorf("unexpected client RoundTrip error: %v", err)
- }
- _, err = io.Copy(ioutil.Discard, res.Body)
- res.Body.Close()
- }
- want := GoAwayError{
- LastStreamID: 5,
- ErrCode: goAwayErrCode,
- DebugData: goAwayDebugData,
- }
- if !reflect.DeepEqual(err, want) {
- t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- t.Logf("ReadFrame: %v", err)
- return nil
- }
- hf, ok := f.(*HeadersFrame)
- if !ok {
- continue
- }
- if failMidBody {
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- }
- // Write two GOAWAY frames, to test that the Transport takes
- // the interesting parts of both.
- ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData))
- ct.fr.WriteGoAway(5, goAwayErrCode, nil)
- ct.sc.(*net.TCPConn).CloseWrite()
- if runtime.GOOS == "plan9" {
- // CloseWrite not supported on Plan 9; Issue 17906
- ct.sc.(*net.TCPConn).Close()
- }
- <-clientDone
- return nil
- }
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+
+ if failMidBody {
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ "content-length", "123",
+ ),
+ })
}
- ct.run()
-}
-func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) {
- ct := newClientTester(t)
+ // Write two GOAWAY frames, to test that the Transport takes
+ // the interesting parts of both.
+ tc.writeGoAway(5, ErrCodeNo, []byte(goAwayDebugData))
+ tc.writeGoAway(5, goAwayErrCode, nil)
+ tc.closeWrite(io.EOF)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
+ res, err := rt.result()
+ whence := "RoundTrip"
+ if failMidBody {
+ whence = "Body.Read"
if err != nil {
- return err
- }
-
- if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 {
- return fmt.Errorf("body read = %v, %v; want 1, nil", n, err)
+ t.Fatalf("RoundTrip error = %v, want success", err)
}
- res.Body.Close() // leaving 4999 bytes unread
+ _, err = res.Body.Read(make([]byte, 1))
+ }
- return nil
+ want := GoAwayError{
+ LastStreamID: 5,
+ ErrCode: goAwayErrCode,
+ DebugData: goAwayDebugData,
+ }
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("%v error = %T: %#v, want %T (%#v)", whence, err, err, want, want)
}
- ct.server = func() error {
- ct.greet()
+}
- var hf *HeadersFrame
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
- }
- switch f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- continue
- }
- var ok bool
- hf, ok = f.(*HeadersFrame)
- if !ok {
- return fmt.Errorf("Got %T; want HeadersFrame", f)
- }
- break
- }
+func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) {
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ "content-length", "5000",
+ ),
+ })
+ initialInflow := tc.inflowWindow(0)
+
+ // Two cases:
+ // - Send one DATA frame with 5000 bytes.
+ // - Send two DATA frames with 1 and 4999 bytes each.
+ //
+ // In both cases, the client should consume one byte of data,
+ // refund that byte, then refund the following 4999 bytes.
+ //
+ // In the second case, the server waits for the client to reset the
+ // stream before sending the second DATA frame. This tests the case
+ // where the client receives a DATA frame after it has reset the stream.
+ const streamNotEnded = false
+ if oneDataFrame {
+ tc.writeData(rt.streamID(), streamNotEnded, make([]byte, 5000))
+ } else {
+ tc.writeData(rt.streamID(), streamNotEnded, make([]byte, 1))
+ }
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- initialInflow := ct.inflowWindow(0)
-
- // Two cases:
- // - Send one DATA frame with 5000 bytes.
- // - Send two DATA frames with 1 and 4999 bytes each.
- //
- // In both cases, the client should consume one byte of data,
- // refund that byte, then refund the following 4999 bytes.
- //
- // In the second case, the server waits for the client to reset the
- // stream before sending the second DATA frame. This tests the case
- // where the client receives a DATA frame after it has reset the stream.
- if oneDataFrame {
- ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000))
- } else {
- ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1))
- }
+ res := rt.response()
+ if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 {
+ t.Fatalf("body read = %v, %v; want 1, nil", n, err)
+ }
+ res.Body.Close() // leaving 4999 bytes unread
+ tc.sync()
- wantRST := true
- wantWUF := true
- if !oneDataFrame {
- wantWUF = false // flow control update is small, and will not be sent
- }
- for wantRST || wantWUF {
- f, err := ct.readNonSettingsFrame()
- if err != nil {
- return err
+ sentAdditionalData := false
+ tc.wantUnorderedFrames(
+ func(f *RSTStreamFrame) bool {
+ if f.ErrCode != ErrCodeCancel {
+ t.Fatalf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f))
}
- switch f := f.(type) {
- case *RSTStreamFrame:
- if !wantRST {
- return fmt.Errorf("Unexpected frame: %v", summarizeFrame(f))
- }
- if f.ErrCode != ErrCodeCancel {
- return fmt.Errorf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f))
- }
- wantRST = false
- case *WindowUpdateFrame:
- if !wantWUF {
- return fmt.Errorf("Unexpected frame: %v", summarizeFrame(f))
- }
- if f.Increment != 5000 {
- return fmt.Errorf("Expected WindowUpdateFrames for 5000 bytes; got %v", summarizeFrame(f))
- }
- wantWUF = false
- default:
- return fmt.Errorf("Unexpected frame: %v", summarizeFrame(f))
+ if !oneDataFrame {
+ // Send the remaining data now.
+ tc.writeData(rt.streamID(), streamNotEnded, make([]byte, 4999))
+ sentAdditionalData = true
}
- }
- if !oneDataFrame {
- ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999))
- f, err := ct.readNonSettingsFrame()
- if err != nil {
- return err
+ return true
+ },
+ func(f *WindowUpdateFrame) bool {
+ if !oneDataFrame && !sentAdditionalData {
+ t.Fatalf("Got WindowUpdateFrame, don't expect one yet")
}
- wuf, ok := f.(*WindowUpdateFrame)
- if !ok || wuf.Increment != 5000 {
- return fmt.Errorf("want WindowUpdateFrame for 5000 bytes; got %v", summarizeFrame(f))
+ if f.Increment != 5000 {
+ t.Fatalf("Expected WindowUpdateFrames for 5000 bytes; got %v", summarizeFrame(f))
}
- }
- if err := ct.writeReadPing(); err != nil {
- return err
- }
- if got, want := ct.inflowWindow(0), initialInflow; got != want {
- return fmt.Errorf("connection flow tokens = %v, want %v", got, want)
- }
- return nil
+ return true
+ },
+ )
+
+ if got, want := tc.inflowWindow(0), initialInflow; got != want {
+ t.Fatalf("connection flow tokens = %v, want %v", got, want)
}
- ct.run()
}
// See golang.org/issue/16481
@@ -3090,199 +2613,124 @@ func TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) {
// Issue 16612: adjust flow control on open streams when transport
// receives SETTINGS with INITIAL_WINDOW_SIZE from server.
func TestTransportAdjustsFlowControl(t *testing.T) {
- ct := newClientTester(t)
- clientDone := make(chan struct{})
-
const bodySize = 1 << 20
- ct.client = func() error {
- defer ct.cc.(*net.TCPConn).CloseWrite()
- if runtime.GOOS == "plan9" {
- // CloseWrite not supported on Plan 9; Issue 17906
- defer ct.cc.(*net.TCPConn).Close()
- }
- defer close(clientDone)
+ tc := newTestClientConn(t)
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+ // Don't write our SETTINGS yet.
- req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)})
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return err
+ body := tc.newRequestBody()
+ body.writeBytes(bodySize)
+ body.closeWithError(io.EOF)
+
+ req, _ := http.NewRequest("POST", "https://dummy.tld/", body)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+
+ gotBytes := int64(0)
+ for {
+ f := testClientConnReadFrame[*DataFrame](tc)
+ gotBytes += int64(len(f.Data()))
+ // After we've got half the client's initial flow control window's worth
+ // of request body data, give it just enough flow control to finish.
+ if gotBytes >= initialWindowSize/2 {
+ break
}
- res.Body.Close()
- return nil
}
- ct.server = func() error {
- _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface)))
- if err != nil {
- return fmt.Errorf("reading client preface: %v", err)
- }
- var gotBytes int64
- var sentSettings bool
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- select {
- case <-clientDone:
- return nil
- default:
- return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
- }
- }
- switch f := f.(type) {
- case *DataFrame:
- gotBytes += int64(len(f.Data()))
- // After we've got half the client's
- // initial flow control window's worth
- // of request body data, give it just
- // enough flow control to finish.
- if gotBytes >= initialWindowSize/2 && !sentSettings {
- sentSettings = true
-
- ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize})
- ct.fr.WriteWindowUpdate(0, bodySize)
- ct.fr.WriteSettingsAck()
- }
+ tc.writeSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize})
+ tc.writeWindowUpdate(0, bodySize)
+ tc.writeSettingsAck()
- if f.StreamEnded() {
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- }
- }
- }
+ tc.wantUnorderedFrames(
+ func(f *SettingsFrame) bool { return true },
+ func(f *DataFrame) bool {
+ gotBytes += int64(len(f.Data()))
+ return f.StreamEnded()
+ },
+ )
+
+ if gotBytes != bodySize {
+ t.Fatalf("server received %v bytes of body, want %v", gotBytes, bodySize)
}
- ct.run()
+
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt.wantStatus(200)
}
// See golang.org/issue/16556
func TestTransportReturnsDataPaddingFlowControl(t *testing.T) {
- ct := newClientTester(t)
-
- unblockClient := make(chan bool, 1)
-
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return err
- }
- defer res.Body.Close()
- <-unblockClient
- return nil
- }
- ct.server = func() error {
- ct.greet()
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ "content-length", "5000",
+ ),
+ })
- var hf *HeadersFrame
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
- }
- switch f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- continue
- }
- var ok bool
- hf, ok = f.(*HeadersFrame)
- if !ok {
- return fmt.Errorf("Got %T; want HeadersFrame", f)
- }
- break
- }
+ initialConnWindow := tc.inflowWindow(0)
+ initialStreamWindow := tc.inflowWindow(rt.streamID())
- initialConnWindow := ct.inflowWindow(0)
+ pad := make([]byte, 5)
+ tc.writeDataPadded(rt.streamID(), false, make([]byte, 5000), pad)
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- initialStreamWindow := ct.inflowWindow(hf.StreamID)
- pad := make([]byte, 5)
- ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream
- if err := ct.writeReadPing(); err != nil {
- return err
- }
- // Padding flow control should have been returned.
- if got, want := ct.inflowWindow(0), initialConnWindow-5000; got != want {
- t.Errorf("conn inflow window = %v, want %v", got, want)
- }
- if got, want := ct.inflowWindow(hf.StreamID), initialStreamWindow-5000; got != want {
- t.Errorf("stream inflow window = %v, want %v", got, want)
- }
- unblockClient <- true
- return nil
+ // Padding flow control should have been returned.
+ if got, want := tc.inflowWindow(0), initialConnWindow-5000; got != want {
+ t.Errorf("conn inflow window = %v, want %v", got, want)
+ }
+ if got, want := tc.inflowWindow(rt.streamID()), initialStreamWindow-5000; got != want {
+ t.Errorf("stream inflow window = %v, want %v", got, want)
}
- ct.run()
}
// golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a
// StreamError as a result of the response HEADERS
func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) {
- ct := newClientTester(t)
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ " content-type", "bogus",
+ ),
+ })
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err == nil {
- res.Body.Close()
- return errors.New("unexpected successful GET")
- }
- want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")}
- if !reflect.DeepEqual(want, err) {
- t.Errorf("RoundTrip error = %#v; want %#v", err, want)
- }
- return nil
+ err := rt.err()
+ want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")}
+ if !reflect.DeepEqual(err, want) {
+ t.Fatalf("RoundTrip error = %#v; want %#v", err, want)
}
- ct.server = func() error {
- ct.greet()
- hf, err := ct.firstHeaders()
- if err != nil {
- return err
- }
-
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
-
- for {
- fr, err := ct.readFrame()
- if err != nil {
- return fmt.Errorf("error waiting for RST_STREAM from client: %v", err)
- }
- if _, ok := fr.(*SettingsFrame); ok {
- continue
- }
- if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol {
- t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr))
- }
- break
- }
-
- return nil
+ fr := testClientConnReadFrame[*RSTStreamFrame](tc)
+ if fr.StreamID != 1 || fr.ErrCode != ErrCodeProtocol {
+ t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr))
}
- ct.run()
}
// byteAndEOFReader returns is in an io.Reader which reads one byte
@@ -3576,26 +3024,24 @@ func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) {
}
func TestTransportCloseAfterLostPing(t *testing.T) {
- clientDone := make(chan struct{})
- ct := newClientTester(t)
- ct.tr.PingTimeout = 1 * time.Second
- ct.tr.ReadIdleTimeout = 1 * time.Second
- ct.client = func() error {
- defer ct.cc.(*net.TCPConn).CloseWrite()
- defer close(clientDone)
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- _, err := ct.tr.RoundTrip(req)
- if err == nil || !strings.Contains(err.Error(), "client connection lost") {
- return fmt.Errorf("expected to get error about \"connection lost\", got %v", err)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- <-clientDone
- return nil
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.PingTimeout = 1 * time.Second
+ tr.ReadIdleTimeout = 1 * time.Second
+ })
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+ tc.wantFrameType(FrameHeaders)
+
+ tc.advance(1 * time.Second)
+ tc.wantFrameType(FramePing)
+
+ tc.advance(1 * time.Second)
+ err := rt.err()
+ if err == nil || !strings.Contains(err.Error(), "client connection lost") {
+ t.Fatalf("expected to get error about \"connection lost\", got %v", err)
}
- ct.run()
}
func TestTransportPingWriteBlocks(t *testing.T) {
@@ -3628,418 +3074,231 @@ func TestTransportPingWriteBlocks(t *testing.T) {
}
}
-func TestTransportPingWhenReading(t *testing.T) {
- testCases := []struct {
- name string
- readIdleTimeout time.Duration
- deadline time.Duration
- expectedPingCount int
- }{
- {
- name: "two pings",
- readIdleTimeout: 100 * time.Millisecond,
- deadline: time.Second,
- expectedPingCount: 2,
- },
- {
- name: "zero ping",
- readIdleTimeout: time.Second,
- deadline: 200 * time.Millisecond,
- expectedPingCount: 0,
- },
- {
- name: "0 readIdleTimeout means no ping",
- readIdleTimeout: 0 * time.Millisecond,
- deadline: 500 * time.Millisecond,
- expectedPingCount: 0,
- },
- }
-
- for _, tc := range testCases {
- tc := tc // capture range variable
- t.Run(tc.name, func(t *testing.T) {
- testTransportPingWhenReading(t, tc.readIdleTimeout, tc.deadline, tc.expectedPingCount)
- })
- }
-}
+func TestTransportPingWhenReadingMultiplePings(t *testing.T) {
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.ReadIdleTimeout = 1000 * time.Millisecond
+ })
+ tc.greet()
-func testTransportPingWhenReading(t *testing.T, readIdleTimeout, deadline time.Duration, expectedPingCount int) {
- var pingCount int
- ct := newClientTester(t)
- ct.tr.ReadIdleTimeout = readIdleTimeout
+ ctx, cancel := context.WithCancel(context.Background())
+ req, _ := http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
- ctx, cancel := context.WithTimeout(context.Background(), deadline)
- defer cancel()
- ct.client = func() error {
- defer ct.cc.(*net.TCPConn).CloseWrite()
- if runtime.GOOS == "plan9" {
- // CloseWrite not supported on Plan 9; Issue 17906
- defer ct.cc.(*net.TCPConn).Close()
- }
- req, _ := http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip: %v", err)
- }
- defer res.Body.Close()
- if res.StatusCode != 200 {
- return fmt.Errorf("status code = %v; want %v", res.StatusCode, 200)
- }
- _, err = ioutil.ReadAll(res.Body)
- if expectedPingCount == 0 && errors.Is(ctx.Err(), context.DeadlineExceeded) {
- return nil
+ for i := 0; i < 5; i++ {
+ // No ping yet...
+ tc.advance(999 * time.Millisecond)
+ if f := tc.readFrame(); f != nil {
+ t.Fatalf("unexpected frame: %v", f)
}
- cancel()
- return err
+ // ...ping now.
+ tc.advance(1 * time.Millisecond)
+ f := testClientConnReadFrame[*PingFrame](tc)
+ tc.writePing(true, f.Data)
}
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- var streamID uint32
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- select {
- case <-ctx.Done():
- // If the client's done, it
- // will have reported any
- // errors on its side.
- return nil
- default:
- return err
- }
- }
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *HeadersFrame:
- if !f.HeadersEnded() {
- return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
- }
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(200)})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- streamID = f.StreamID
- case *PingFrame:
- pingCount++
- if pingCount == expectedPingCount {
- if err := ct.fr.WriteData(streamID, true, []byte("hello, this is last server data frame")); err != nil {
- return err
- }
- }
- if err := ct.fr.WritePing(true, f.Data); err != nil {
- return err
- }
- case *RSTStreamFrame:
- default:
- return fmt.Errorf("Unexpected client frame %v", f)
- }
- }
+ // Cancel the request, Transport resets it and returns an error from body reads.
+ cancel()
+ tc.sync()
+
+ tc.wantFrameType(FrameRSTStream)
+ _, err := rt.readBody()
+ if err == nil {
+ t.Fatalf("Response.Body.Read() = %v, want error", err)
}
- ct.run()
}
-func testClientMultipleDials(t *testing.T, client func(*Transport), server func(int, *clientTester)) {
- ln := newLocalListener(t)
- defer ln.Close()
-
- var (
- mu sync.Mutex
- count int
- conns []net.Conn
- )
- var wg sync.WaitGroup
- tr := &Transport{
- TLSClientConfig: tlsConfigInsecure,
- }
- tr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) {
- mu.Lock()
- defer mu.Unlock()
- count++
- cc, err := net.Dial("tcp", ln.Addr().String())
- if err != nil {
- return nil, fmt.Errorf("dial error: %v", err)
- }
- conns = append(conns, cc)
- sc, err := ln.Accept()
- if err != nil {
- return nil, fmt.Errorf("accept error: %v", err)
- }
- conns = append(conns, sc)
- ct := &clientTester{
- t: t,
- tr: tr,
- cc: cc,
- sc: sc,
- fr: NewFramer(sc, sc),
- }
- wg.Add(1)
- go func(count int) {
- defer wg.Done()
- server(count, ct)
- }(count)
- return cc, nil
- }
+func TestTransportPingWhenReadingPingDisabled(t *testing.T) {
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.ReadIdleTimeout = 0 // PINGs disabled
+ })
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
- client(tr)
- tr.CloseIdleConnections()
- ln.Close()
- for _, c := range conns {
- c.Close()
+ // No PING is sent, even after a long delay.
+ tc.advance(1 * time.Minute)
+ if f := tc.readFrame(); f != nil {
+ t.Fatalf("unexpected frame: %v", f)
}
- wg.Wait()
}
func TestTransportRetryAfterGOAWAY(t *testing.T) {
- client := func(tr *Transport) {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := tr.RoundTrip(req)
- if res != nil {
- res.Body.Close()
- if got := res.Header.Get("Foo"); got != "bar" {
- err = fmt.Errorf("foo header = %q; want bar", got)
- }
- }
- if err != nil {
- t.Errorf("RoundTrip: %v", err)
- }
- }
-
- server := func(count int, ct *clientTester) {
- switch count {
- case 1:
- ct.greet()
- hf, err := ct.firstHeaders()
- if err != nil {
- t.Errorf("server1 failed reading HEADERS: %v", err)
- return
- }
- t.Logf("server1 got %v", hf)
- if err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil {
- t.Errorf("server1 failed writing GOAWAY: %v", err)
- return
- }
- case 2:
- ct.greet()
- hf, err := ct.firstHeaders()
- if err != nil {
- t.Errorf("server2 failed reading HEADERS: %v", err)
- return
- }
- t.Logf("server2 got %v", hf)
-
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"})
- err = ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- if err != nil {
- t.Errorf("server2 failed writing response HEADERS: %v", err)
- }
- default:
- t.Errorf("unexpected number of dials")
- return
- }
- }
+ tt := newTestTransport(t)
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tt.roundTrip(req)
+
+ // First attempt: Server sends a GOAWAY.
+ tc := tt.getConn()
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+ tc.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
+ tc.writeSettings()
+ tc.writeGoAway(0 /*max id*/, ErrCodeNo, nil)
+ if rt.done() {
+ t.Fatalf("after GOAWAY, RoundTrip is done; want it to be retrying")
+ }
+
+ // Second attempt succeeds on a new connection.
+ tc = tt.getConn()
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+ tc.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
+ tc.writeSettings()
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
- testClientMultipleDials(t, client, server)
+ rt.wantStatus(200)
}
func TestTransportRetryAfterRefusedStream(t *testing.T) {
- clientDone := make(chan struct{})
- client := func(tr *Transport) {
- defer close(clientDone)
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- resp, err := tr.RoundTrip(req)
- if err != nil {
- t.Errorf("RoundTrip: %v", err)
- return
- }
- resp.Body.Close()
- if resp.StatusCode != 204 {
- t.Errorf("Status = %v; want 204", resp.StatusCode)
- return
- }
+ tt := newTestTransport(t)
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tt.roundTrip(req)
+
+ // First attempt: Server sends a RST_STREAM.
+ tc := tt.getConn()
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+ tc.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
+ tc.writeSettings()
+ tc.wantFrameType(FrameSettings) // settings ACK
+ tc.writeRSTStream(1, ErrCodeRefusedStream)
+ if rt.done() {
+ t.Fatalf("after RST_STREAM, RoundTrip is done; want it to be retrying")
}
- server := func(_ int, ct *clientTester) {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- var count int
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- select {
- case <-clientDone:
- // If the client's done, it
- // will have reported any
- // errors on its side.
- default:
- t.Error(err)
- }
- return
- }
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *HeadersFrame:
- if !f.HeadersEnded() {
- t.Errorf("headers should have END_HEADERS be ended: %v", f)
- return
- }
- count++
- if count == 1 {
- ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream)
- } else {
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- }
- default:
- t.Errorf("Unexpected client frame %v", f)
- return
- }
- }
- }
+ // Second attempt succeeds on the same connection.
+ tc.wantHeaders(wantHeader{
+ streamID: 3,
+ endStream: true,
+ })
+ tc.writeSettings()
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: 3,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "204",
+ ),
+ })
- testClientMultipleDials(t, client, server)
+ rt.wantStatus(204)
}
func TestTransportRetryHasLimit(t *testing.T) {
- // Skip in short mode because the total expected delay is 1s+2s+4s+8s+16s=29s.
- if testing.Short() {
- t.Skip("skipping long test in short mode")
- }
- retryBackoffHook = func(d time.Duration) *time.Timer {
- return time.NewTimer(0) // fires immediately
- }
- defer func() {
- retryBackoffHook = nil
- }()
- clientDone := make(chan struct{})
- ct := newClientTester(t)
- ct.client = func() error {
- defer ct.cc.(*net.TCPConn).CloseWrite()
- if runtime.GOOS == "plan9" {
- // CloseWrite not supported on Plan 9; Issue 17906
- defer ct.cc.(*net.TCPConn).Close()
- }
- defer close(clientDone)
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- resp, err := ct.tr.RoundTrip(req)
- if err == nil {
- return fmt.Errorf("RoundTrip expected error, got response: %+v", resp)
+ tt := newTestTransport(t)
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tt.roundTrip(req)
+
+ // First attempt: Server sends a GOAWAY.
+ tc := tt.getConn()
+ tc.wantFrameType(FrameSettings)
+ tc.wantFrameType(FrameWindowUpdate)
+
+ var totalDelay time.Duration
+ count := 0
+ for streamID := uint32(1); ; streamID += 2 {
+ count++
+ tc.wantHeaders(wantHeader{
+ streamID: streamID,
+ endStream: true,
+ })
+ if streamID == 1 {
+ tc.writeSettings()
+ tc.wantFrameType(FrameSettings) // settings ACK
}
- t.Logf("expected error, got: %v", err)
- return nil
- }
- ct.server = func() error {
- ct.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- select {
- case <-clientDone:
- // If the client's done, it
- // will have reported any
- // errors on its side.
- return nil
- default:
- return err
- }
- }
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *HeadersFrame:
- if !f.HeadersEnded() {
- return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
- }
- ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream)
- default:
- return fmt.Errorf("Unexpected client frame %v", f)
+ tc.writeRSTStream(streamID, ErrCodeRefusedStream)
+
+ d := tt.tr.syncHooks.timeUntilEvent()
+ if d == 0 {
+ if streamID == 1 {
+ continue
}
+ break
+ }
+ totalDelay += d
+ if totalDelay > 5*time.Minute {
+ t.Fatalf("RoundTrip still retrying after %v, should have given up", totalDelay)
}
+ tt.advance(d)
+ }
+ if got, want := count, 5; got < count {
+ t.Errorf("RoundTrip made %v attempts, want at least %v", got, want)
+ }
+ if rt.err() == nil {
+ t.Errorf("RoundTrip succeeded, want error")
}
- ct.run()
}
func TestTransportResponseDataBeforeHeaders(t *testing.T) {
- // This test use not valid response format.
- // Discarding logger output to not spam tests output.
- log.SetOutput(ioutil.Discard)
- defer log.SetOutput(os.Stderr)
+ // Discard log output complaining about protocol error.
+ log.SetOutput(io.Discard)
+ t.Cleanup(func() { log.SetOutput(os.Stderr) }) // after other cleanup is done
+
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ // First request is normal to ensure the check is per stream and not per connection.
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt1 := tc.roundTrip(req)
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt1.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt1.wantStatus(200)
- ct := newClientTester(t)
- ct.client = func() error {
- defer ct.cc.(*net.TCPConn).CloseWrite()
- if runtime.GOOS == "plan9" {
- // CloseWrite not supported on Plan 9; Issue 17906
- defer ct.cc.(*net.TCPConn).Close()
- }
- req := httptest.NewRequest("GET", "https://dummy.tld/", nil)
- // First request is normal to ensure the check is per stream and not per connection.
- _, err := ct.tr.RoundTrip(req)
- if err != nil {
- return fmt.Errorf("RoundTrip expected no error, got: %v", err)
- }
- // Second request returns a DATA frame with no HEADERS.
- resp, err := ct.tr.RoundTrip(req)
- if err == nil {
- return fmt.Errorf("RoundTrip expected error, got response: %+v", resp)
- }
- if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol {
- return fmt.Errorf("expected stream PROTOCOL_ERROR, got: %v", err)
- }
- return nil
+ // Second request returns a DATA frame with no HEADERS.
+ rt2 := tc.roundTrip(req)
+ tc.wantFrameType(FrameHeaders)
+ tc.writeData(rt2.streamID(), true, []byte("payload"))
+ if err, ok := rt2.err().(StreamError); !ok || err.Code != ErrCodeProtocol {
+ t.Fatalf("expected stream PROTOCOL_ERROR, got: %v", err)
}
- ct.server = func() error {
- ct.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err == io.EOF {
- return nil
- } else if err != nil {
- return err
- }
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame, *RSTStreamFrame:
- case *HeadersFrame:
- switch f.StreamID {
- case 1:
- // Send a valid response to first request.
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- case 3:
- ct.fr.WriteData(f.StreamID, true, []byte("payload"))
- }
- default:
- return fmt.Errorf("Unexpected client frame %v", f)
- }
- }
- }
- ct.run()
}
func TestTransportMaxFrameReadSize(t *testing.T) {
@@ -4053,30 +3312,17 @@ func TestTransportMaxFrameReadSize(t *testing.T) {
maxReadFrameSize: 1024,
want: minMaxFrameSize,
}} {
- ct := newClientTester(t)
- ct.tr.MaxReadFrameSize = test.maxReadFrameSize
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", http.NoBody)
- ct.tr.RoundTrip(req)
- return nil
- }
- ct.server = func() error {
- defer ct.cc.(*net.TCPConn).Close()
- ct.greet()
- var got uint32
- ct.settings.ForeachSetting(func(s Setting) error {
- switch s.ID {
- case SettingMaxFrameSize:
- got = s.Val
- }
- return nil
- })
- if got != test.want {
- t.Errorf("Transport.MaxReadFrameSize = %v; server got %v, want %v", test.maxReadFrameSize, got, test.want)
- }
- return nil
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.MaxReadFrameSize = test.maxReadFrameSize
+ })
+
+ fr := testClientConnReadFrame[*SettingsFrame](tc)
+ got, ok := fr.Value(SettingMaxFrameSize)
+ if !ok {
+ t.Errorf("Transport.MaxReadFrameSize = %v; server got no setting, want %v", test.maxReadFrameSize, test.want)
+ } else if got != test.want {
+ t.Errorf("Transport.MaxReadFrameSize = %v; server got %v, want %v", test.maxReadFrameSize, got, test.want)
}
- ct.run()
}
}
@@ -4129,324 +3375,113 @@ func TestTransportRequestsLowServerLimit(t *testing.T) {
func TestTransportRequestsStallAtServerLimit(t *testing.T) {
const maxConcurrent = 2
- greet := make(chan struct{}) // server sends initial SETTINGS frame
- gotRequest := make(chan struct{}) // server received a request
- clientDone := make(chan struct{})
- cancelClientRequest := make(chan struct{})
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.StrictMaxConcurrentStreams = true
+ })
+ tc.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent})
- // Collect errors from goroutines.
- var wg sync.WaitGroup
- errs := make(chan error, 100)
- defer func() {
- wg.Wait()
- close(errs)
- for err := range errs {
- t.Error(err)
- }
- }()
+ cancelClientRequest := make(chan struct{})
- // We will send maxConcurrent+2 requests. This checker goroutine waits for the
- // following stages:
- // 1. The first maxConcurrent requests are received by the server.
- // 2. The client will cancel the next request
- // 3. The server is unblocked so it can service the first maxConcurrent requests
- // 4. The client will send the final request
- wg.Add(1)
- unblockClient := make(chan struct{})
- clientRequestCancelled := make(chan struct{})
- unblockServer := make(chan struct{})
- go func() {
- defer wg.Done()
- // Stage 1.
- for k := 0; k < maxConcurrent; k++ {
- <-gotRequest
- }
- // Stage 2.
- close(unblockClient)
- <-clientRequestCancelled
- // Stage 3: give some time for the final RoundTrip call to be scheduled and
- // verify that the final request is not sent.
- time.Sleep(50 * time.Millisecond)
- select {
- case <-gotRequest:
- errs <- errors.New("last request did not stall")
- close(unblockServer)
- return
- default:
+ // Start maxConcurrent+2 requests.
+ // The server does not respond to any of them yet.
+ var rts []*testRoundTrip
+ for k := 0; k < maxConcurrent+2; k++ {
+ req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), nil)
+ if k == maxConcurrent {
+ req.Cancel = cancelClientRequest
+ }
+ rt := tc.roundTrip(req)
+ rts = append(rts, rt)
+
+ if k < maxConcurrent {
+ // We are under the stream limit, so the client sends the request.
+ tc.wantHeaders(wantHeader{
+ streamID: rt.streamID(),
+ endStream: true,
+ header: http.Header{
+ ":authority": []string{"dummy.tld"},
+ ":method": []string{"GET"},
+ ":path": []string{fmt.Sprintf("/%d", k)},
+ },
+ })
+ } else {
+ // We have reached the stream limit,
+ // so the client cannot send the request.
+ if fr := tc.readFrame(); fr != nil {
+ t.Fatalf("after making new request while at stream limit, got unexpected frame: %v", fr)
+ }
}
- close(unblockServer)
- // Stage 4.
- <-gotRequest
- }()
- ct := newClientTester(t)
- ct.tr.StrictMaxConcurrentStreams = true
- ct.client = func() error {
- var wg sync.WaitGroup
- defer func() {
- wg.Wait()
- close(clientDone)
- ct.cc.(*net.TCPConn).CloseWrite()
- if runtime.GOOS == "plan9" {
- // CloseWrite not supported on Plan 9; Issue 17906
- ct.cc.(*net.TCPConn).Close()
- }
- }()
- for k := 0; k < maxConcurrent+2; k++ {
- wg.Add(1)
- go func(k int) {
- defer wg.Done()
- // Don't send the second request until after receiving SETTINGS from the server
- // to avoid a race where we use the default SettingMaxConcurrentStreams, which
- // is much larger than maxConcurrent. We have to send the first request before
- // waiting because the first request triggers the dial and greet.
- if k > 0 {
- <-greet
- }
- // Block until maxConcurrent requests are sent before sending any more.
- if k >= maxConcurrent {
- <-unblockClient
- }
- body := newStaticCloseChecker("")
- req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), body)
- if k == maxConcurrent {
- // This request will be canceled.
- req.Cancel = cancelClientRequest
- close(cancelClientRequest)
- _, err := ct.tr.RoundTrip(req)
- close(clientRequestCancelled)
- if err == nil {
- errs <- fmt.Errorf("RoundTrip(%d) should have failed due to cancel", k)
- return
- }
- } else {
- resp, err := ct.tr.RoundTrip(req)
- if err != nil {
- errs <- fmt.Errorf("RoundTrip(%d): %v", k, err)
- return
- }
- ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- if resp.StatusCode != 204 {
- errs <- fmt.Errorf("Status = %v; want 204", resp.StatusCode)
- return
- }
- }
- if err := body.isClosed(); err != nil {
- errs <- fmt.Errorf("RoundTrip(%d): %v", k, err)
- }
- }(k)
+ if rt.done() {
+ t.Fatalf("rt %v done", k)
}
- return nil
}
- ct.server = func() error {
- var wg sync.WaitGroup
- defer wg.Wait()
-
- ct.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent})
-
- // Server write loop.
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- writeResp := make(chan uint32, maxConcurrent+1)
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- <-unblockServer
- for id := range writeResp {
- buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: id,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- }
- }()
+ // Cancel the maxConcurrent'th request.
+ // The request should fail.
+ close(cancelClientRequest)
+ tc.sync()
+ if err := rts[maxConcurrent].err(); err == nil {
+ t.Fatalf("RoundTrip(%d) should have failed due to cancel, did not", maxConcurrent)
+ }
- // Server read loop.
- var nreq int
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- select {
- case <-clientDone:
- // If the client's done, it will have reported any errors on its side.
- return nil
- default:
- return err
- }
- }
- switch f := f.(type) {
- case *WindowUpdateFrame:
- case *SettingsFrame:
- // Wait for the client SETTINGS ack until ending the greet.
- close(greet)
- case *HeadersFrame:
- if !f.HeadersEnded() {
- return fmt.Errorf("headers should have END_HEADERS be ended: %v", f)
- }
- gotRequest <- struct{}{}
- nreq++
- writeResp <- f.StreamID
- if nreq == maxConcurrent+1 {
- close(writeResp)
- }
- case *DataFrame:
- default:
- return fmt.Errorf("Unexpected client frame %v", f)
- }
+ // No requests should be complete, except for the canceled one.
+ for i, rt := range rts {
+ if i != maxConcurrent && rt.done() {
+ t.Fatalf("RoundTrip(%d) is done, but should not be", i)
}
}
- ct.run()
+ // Server responds to a request, unblocking the last one.
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rts[0].streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ tc.wantHeaders(wantHeader{
+ streamID: rts[maxConcurrent+1].streamID(),
+ endStream: true,
+ header: http.Header{
+ ":authority": []string{"dummy.tld"},
+ ":method": []string{"GET"},
+ ":path": []string{fmt.Sprintf("/%d", maxConcurrent+1)},
+ },
+ })
+ rts[0].wantStatus(200)
}
func TestTransportMaxDecoderHeaderTableSize(t *testing.T) {
- ct := newClientTester(t)
var reqSize, resSize uint32 = 8192, 16384
- ct.tr.MaxDecoderHeaderTableSize = reqSize
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- cc, err := ct.tr.NewClientConn(ct.cc)
- if err != nil {
- return err
- }
- _, err = cc.RoundTrip(req)
- if err != nil {
- return err
- }
- if got, want := cc.peerMaxHeaderTableSize, resSize; got != want {
- return fmt.Errorf("peerHeaderTableSize = %d, want %d", got, want)
- }
- return nil
- }
- ct.server = func() error {
- buf := make([]byte, len(ClientPreface))
- _, err := io.ReadFull(ct.sc, buf)
- if err != nil {
- return fmt.Errorf("reading client preface: %v", err)
- }
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- ct.t.Fatalf("wanted client settings frame; got %v", f)
- _ = sf // stash it away?
- }
- var found bool
- err = sf.ForeachSetting(func(s Setting) error {
- if s.ID == SettingHeaderTableSize {
- found = true
- if got, want := s.Val, reqSize; got != want {
- return fmt.Errorf("received SETTINGS_HEADER_TABLE_SIZE = %d, want %d", got, want)
- }
- }
- return nil
- })
- if err != nil {
- return err
- }
- if !found {
- return fmt.Errorf("missing SETTINGS_HEADER_TABLE_SIZE setting")
- }
- if err := ct.fr.WriteSettings(Setting{SettingHeaderTableSize, resSize}); err != nil {
- ct.t.Fatal(err)
- }
- if err := ct.fr.WriteSettingsAck(); err != nil {
- ct.t.Fatal(err)
- }
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.MaxDecoderHeaderTableSize = reqSize
+ })
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *HeadersFrame:
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- return nil
- }
- }
+ fr := testClientConnReadFrame[*SettingsFrame](tc)
+ if v, ok := fr.Value(SettingHeaderTableSize); !ok {
+ t.Fatalf("missing SETTINGS_HEADER_TABLE_SIZE setting")
+ } else if v != reqSize {
+ t.Fatalf("received SETTINGS_HEADER_TABLE_SIZE = %d, want %d", v, reqSize)
}
- ct.run()
-}
-func TestTransportMaxEncoderHeaderTableSize(t *testing.T) {
- ct := newClientTester(t)
- var peerAdvertisedMaxHeaderTableSize uint32 = 16384
- ct.tr.MaxEncoderHeaderTableSize = 8192
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- cc, err := ct.tr.NewClientConn(ct.cc)
- if err != nil {
- return err
- }
- _, err = cc.RoundTrip(req)
- if err != nil {
- return err
- }
- if got, want := cc.henc.MaxDynamicTableSize(), ct.tr.MaxEncoderHeaderTableSize; got != want {
- return fmt.Errorf("henc.MaxDynamicTableSize() = %d, want %d", got, want)
- }
- return nil
+ tc.writeSettings(Setting{SettingHeaderTableSize, resSize})
+ if got, want := tc.cc.peerMaxHeaderTableSize, resSize; got != want {
+ t.Fatalf("peerHeaderTableSize = %d, want %d", got, want)
}
- ct.server = func() error {
- buf := make([]byte, len(ClientPreface))
- _, err := io.ReadFull(ct.sc, buf)
- if err != nil {
- return fmt.Errorf("reading client preface: %v", err)
- }
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- ct.t.Fatalf("wanted client settings frame; got %v", f)
- _ = sf // stash it away?
- }
- if err := ct.fr.WriteSettings(Setting{SettingHeaderTableSize, peerAdvertisedMaxHeaderTableSize}); err != nil {
- ct.t.Fatal(err)
- }
- if err := ct.fr.WriteSettingsAck(); err != nil {
- ct.t.Fatal(err)
- }
+}
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *HeadersFrame:
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- return nil
- }
- }
+func TestTransportMaxEncoderHeaderTableSize(t *testing.T) {
+ var peerAdvertisedMaxHeaderTableSize uint32 = 16384
+ tc := newTestClientConn(t, func(tr *Transport) {
+ tr.MaxEncoderHeaderTableSize = 8192
+ })
+ tc.greet(Setting{SettingHeaderTableSize, peerAdvertisedMaxHeaderTableSize})
+
+ if got, want := tc.cc.henc.MaxDynamicTableSize(), tc.tr.MaxEncoderHeaderTableSize; got != want {
+ t.Fatalf("henc.MaxDynamicTableSize() = %d, want %d", got, want)
}
- ct.run()
}
func TestAuthorityAddr(t *testing.T) {
@@ -4456,11 +3491,14 @@ func TestAuthorityAddr(t *testing.T) {
}{
{"http", "foo.com", "foo.com:80"},
{"https", "foo.com", "foo.com:443"},
+ {"https", "foo.com:", "foo.com:443"},
{"https", "foo.com:1234", "foo.com:1234"},
{"https", "1.2.3.4:1234", "1.2.3.4:1234"},
{"https", "1.2.3.4", "1.2.3.4:443"},
+ {"https", "1.2.3.4:", "1.2.3.4:443"},
{"https", "[::1]:1234", "[::1]:1234"},
{"https", "[::1]", "[::1]:443"},
+ {"https", "[::1]:", "[::1]:443"},
}
for _, tt := range tests {
got := authorityAddr(tt.scheme, tt.authority)
@@ -4527,40 +3565,24 @@ func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) {
// Issue 18891: make sure Request.Body == NoBody means no DATA frame
// is ever sent, even if empty.
func TestTransportNoBodyMeansNoDATA(t *testing.T) {
- ct := newClientTester(t)
-
- unblockClient := make(chan bool)
-
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", http.NoBody)
- ct.tr.RoundTrip(req)
- <-unblockClient
- return nil
- }
- ct.server = func() error {
- defer close(unblockClient)
- defer ct.cc.(*net.TCPConn).Close()
- ct.greet()
-
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return fmt.Errorf("ReadFrame while waiting for Headers: %v", err)
- }
- switch f := f.(type) {
- default:
- return fmt.Errorf("Got %T; want HeadersFrame", f)
- case *WindowUpdateFrame, *SettingsFrame:
- continue
- case *HeadersFrame:
- if !f.StreamEnded() {
- return fmt.Errorf("got headers frame without END_STREAM")
- }
- return nil
- }
- }
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", http.NoBody)
+ rt := tc.roundTrip(req)
+
+ tc.wantHeaders(wantHeader{
+ streamID: rt.streamID(),
+ endStream: true, // END_STREAM should be set when body is http.NoBody
+ header: http.Header{
+ ":authority": []string{"dummy.tld"},
+ ":method": []string{"GET"},
+ ":path": []string{"/"},
+ },
+ })
+ if fr := tc.readFrame(); fr != nil {
+ t.Fatalf("unexpected frame after headers: %v", fr)
}
- ct.run()
}
func benchSimpleRoundTrip(b *testing.B, nReqHeaders, nResHeader int) {
@@ -4639,41 +3661,22 @@ func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) {
// Verify transport doesn't crash when receiving bogus response lacking a :status header.
// Issue 22880.
func TestTransportHandlesInvalidStatuslessResponse(t *testing.T) {
- ct := newClientTester(t)
- ct.client = func() error {
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- _, err := ct.tr.RoundTrip(req)
- const substr = "malformed response from server: missing status pseudo header"
- if !strings.Contains(fmt.Sprint(err), substr) {
- return fmt.Errorf("RoundTrip error = %v; want substring %q", err, substr)
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
-
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
- switch f := f.(type) {
- case *HeadersFrame:
- enc.WriteField(hpack.HeaderField{Name: "content-type", Value: "text/html"}) // no :status header
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: false, // we'll send some DATA to try to crash the transport
- BlockFragment: buf.Bytes(),
- })
- ct.fr.WriteData(f.StreamID, true, []byte("payload"))
- return nil
- }
- }
- }
- ct.run()
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false, // we'll send some DATA to try to crash the transport
+ BlockFragment: tc.makeHeaderBlockFragment(
+ "content-type", "text/html", // no :status header
+ ),
+ })
+ tc.writeData(rt.streamID(), true, []byte("payload"))
}
func BenchmarkClientRequestHeaders(b *testing.B) {
@@ -5021,95 +4024,42 @@ func (r *errReader) Read(p []byte) (int, error) {
}
func testTransportBodyReadError(t *testing.T, body []byte) {
- if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
- // So far we've only seen this be flaky on Windows and Plan 9,
- // perhaps due to TCP behavior on shutdowns while
- // unread data is in flight. This test should be
- // fixed, but a skip is better than annoying people
- // for now.
- t.Skipf("skipping flaky test on %s; https://golang.org/issue/31260", runtime.GOOS)
- }
- clientDone := make(chan struct{})
- ct := newClientTester(t)
- ct.client = func() error {
- defer ct.cc.(*net.TCPConn).CloseWrite()
- if runtime.GOOS == "plan9" {
- // CloseWrite not supported on Plan 9; Issue 17906
- defer ct.cc.(*net.TCPConn).Close()
- }
- defer close(clientDone)
-
- checkNoStreams := func() error {
- cp, ok := ct.tr.connPool().(*clientConnPool)
- if !ok {
- return fmt.Errorf("conn pool is %T; want *clientConnPool", ct.tr.connPool())
- }
- cp.mu.Lock()
- defer cp.mu.Unlock()
- conns, ok := cp.conns["dummy.tld:443"]
- if !ok {
- return fmt.Errorf("missing connection")
- }
- if len(conns) != 1 {
- return fmt.Errorf("conn pool size: %v; expect 1", len(conns))
- }
- if activeStreams(conns[0]) != 0 {
- return fmt.Errorf("active streams count: %v; want 0", activeStreams(conns[0]))
- }
- return nil
- }
- bodyReadError := errors.New("body read error")
- body := &errReader{body, bodyReadError}
- req, err := http.NewRequest("PUT", "https://dummy.tld/", body)
- if err != nil {
- return err
- }
- _, err = ct.tr.RoundTrip(req)
- if err != bodyReadError {
- return fmt.Errorf("err = %v; want %v", err, bodyReadError)
- }
- if err = checkNoStreams(); err != nil {
- return err
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ bodyReadError := errors.New("body read error")
+ b := tc.newRequestBody()
+ b.Write(body)
+ b.closeWithError(bodyReadError)
+ req, _ := http.NewRequest("PUT", "https://dummy.tld/", b)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ var receivedBody []byte
+readFrames:
+ for {
+ switch f := tc.readFrame().(type) {
+ case *DataFrame:
+ receivedBody = append(receivedBody, f.Data()...)
+ case *RSTStreamFrame:
+ break readFrames
+ default:
+ t.Fatalf("unexpected frame: %v", f)
+ case nil:
+ t.Fatalf("transport is idle, want RST_STREAM")
}
- return nil
}
- ct.server = func() error {
- ct.greet()
- var receivedBody []byte
- var resetCount int
- for {
- f, err := ct.fr.ReadFrame()
- t.Logf("server: ReadFrame = %v, %v", f, err)
- if err != nil {
- select {
- case <-clientDone:
- // If the client's done, it
- // will have reported any
- // errors on its side.
- if bytes.Compare(receivedBody, body) != 0 {
- return fmt.Errorf("body: %q; expected %q", receivedBody, body)
- }
- if resetCount != 1 {
- return fmt.Errorf("stream reset count: %v; expected: 1", resetCount)
- }
- return nil
- default:
- return err
- }
- }
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *HeadersFrame:
- case *DataFrame:
- receivedBody = append(receivedBody, f.Data()...)
- case *RSTStreamFrame:
- resetCount++
- default:
- return fmt.Errorf("Unexpected client frame %v", f)
- }
- }
+ if !bytes.Equal(receivedBody, body) {
+ t.Fatalf("body: %q; expected %q", receivedBody, body)
+ }
+
+ if err := rt.err(); err != bodyReadError {
+ t.Fatalf("err = %v; want %v", err, bodyReadError)
+ }
+
+ if got := activeStreams(tc.cc); got != 0 {
+ t.Fatalf("active streams count: %v; want 0", got)
}
- ct.run()
}
func TestTransportBodyReadError_Immediately(t *testing.T) { testTransportBodyReadError(t, nil) }
@@ -5122,59 +4072,18 @@ func TestTransportBodyEagerEndStream(t *testing.T) {
const reqBody = "some request body"
const resBody = "some response body"
- ct := newClientTester(t)
- ct.client = func() error {
- defer ct.cc.(*net.TCPConn).CloseWrite()
- if runtime.GOOS == "plan9" {
- // CloseWrite not supported on Plan 9; Issue 17906
- defer ct.cc.(*net.TCPConn).Close()
- }
- body := strings.NewReader(reqBody)
- req, err := http.NewRequest("PUT", "https://dummy.tld/", body)
- if err != nil {
- return err
- }
- _, err = ct.tr.RoundTrip(req)
- if err != nil {
- return err
- }
- return nil
- }
- ct.server = func() error {
- ct.greet()
+ tc := newTestClientConn(t)
+ tc.greet()
- for {
- f, err := ct.fr.ReadFrame()
- if err != nil {
- return err
- }
+ body := strings.NewReader(reqBody)
+ req, _ := http.NewRequest("PUT", "https://dummy.tld/", body)
+ tc.roundTrip(req)
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *HeadersFrame:
- case *DataFrame:
- if !f.StreamEnded() {
- ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream)
- return fmt.Errorf("data frame without END_STREAM %v", f)
- }
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.Header().StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: buf.Bytes(),
- })
- ct.fr.WriteData(f.StreamID, true, []byte(resBody))
- return nil
- case *RSTStreamFrame:
- default:
- return fmt.Errorf("Unexpected client frame %v", f)
- }
- }
+ tc.wantFrameType(FrameHeaders)
+ f := testClientConnReadFrame[*DataFrame](tc)
+ if !f.StreamEnded() {
+ t.Fatalf("data frame without END_STREAM %v", f)
}
- ct.run()
}
type chunkReader struct {
@@ -5823,155 +4732,80 @@ func TestTransportCloseRequestBody(t *testing.T) {
}
}
-// collectClientsConnPool is a ClientConnPool that wraps lower and
-// collects what calls were made on it.
-type collectClientsConnPool struct {
- lower ClientConnPool
-
- mu sync.Mutex
- getErrs int
- got []*ClientConn
-}
-
-func (p *collectClientsConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
- cc, err := p.lower.GetClientConn(req, addr)
- p.mu.Lock()
- defer p.mu.Unlock()
- if err != nil {
- p.getErrs++
- return nil, err
- }
- p.got = append(p.got, cc)
- return cc, nil
-}
-
-func (p *collectClientsConnPool) MarkDead(cc *ClientConn) {
- p.lower.MarkDead(cc)
-}
-
func TestTransportRetriesOnStreamProtocolError(t *testing.T) {
- ct := newClientTester(t)
- pool := &collectClientsConnPool{
- lower: &clientConnPool{t: ct.tr},
- }
- ct.tr.ConnPool = pool
+ // This test verifies that
+ // - receiving a protocol error on a connection does not interfere with
+ // other requests in flight on that connection;
+ // - the connection is not reused for further requests; and
+ // - the failed request is retried on a new connecection.
+ tt := newTestTransport(t)
+
+ // Start two requests. The first is a long request
+ // that will finish after the second. The second one
+ // will result in the protocol error.
+
+ // Request #1: The long request.
+ req1, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt1 := tt.roundTrip(req1)
+ tc1 := tt.getConn()
+ tc1.wantFrameType(FrameSettings)
+ tc1.wantFrameType(FrameWindowUpdate)
+ tc1.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
+ tc1.writeSettings()
+ tc1.wantFrameType(FrameSettings) // settings ACK
+
+ // Request #2(a): The short request.
+ req2, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt2 := tt.roundTrip(req2)
+ tc1.wantHeaders(wantHeader{
+ streamID: 3,
+ endStream: true,
+ })
- gotProtoError := make(chan bool, 1)
- ct.tr.CountError = func(errType string) {
- if errType == "recv_rststream_PROTOCOL_ERROR" {
- select {
- case gotProtoError <- true:
- default:
- }
- }
+ // Request #2(a) fails with ErrCodeProtocol.
+ tc1.writeRSTStream(3, ErrCodeProtocol)
+ if rt1.done() {
+ t.Fatalf("After protocol error on RoundTrip #2, RoundTrip #1 is done; want still in progress")
}
- ct.client = func() error {
- // Start two requests. The first is a long request
- // that will finish after the second. The second one
- // will result in the protocol error. We check that
- // after the first one closes, the connection then
- // shuts down.
-
- // The long, outer request.
- req1, _ := http.NewRequest("GET", "https://dummy.tld/long", nil)
- res1, err := ct.tr.RoundTrip(req1)
- if err != nil {
- return err
- }
- if got, want := res1.Header.Get("Is-Long"), "1"; got != want {
- return fmt.Errorf("First response's Is-Long header = %q; want %q", got, want)
- }
-
- req, _ := http.NewRequest("POST", "https://dummy.tld/fails", nil)
- res, err := ct.tr.RoundTrip(req)
- const want = "only one dial allowed in test mode"
- if got := fmt.Sprint(err); got != want {
- t.Errorf("didn't dial again: got %#q; want %#q", got, want)
- }
- if res != nil {
- res.Body.Close()
- }
- select {
- case <-gotProtoError:
- default:
- t.Errorf("didn't get stream protocol error")
- }
-
- if n, err := res1.Body.Read(make([]byte, 10)); err != io.EOF || n != 0 {
- t.Errorf("unexpected body read %v, %v", n, err)
- }
-
- pool.mu.Lock()
- defer pool.mu.Unlock()
- if pool.getErrs != 1 {
- t.Errorf("pool get errors = %v; want 1", pool.getErrs)
- }
- if len(pool.got) == 2 {
- if pool.got[0] != pool.got[1] {
- t.Errorf("requests went on different connections")
- }
- cc := pool.got[0]
- cc.mu.Lock()
- if !cc.doNotReuse {
- t.Error("ClientConn not marked doNotReuse")
- }
- cc.mu.Unlock()
-
- select {
- case <-cc.readerDone:
- case <-time.After(5 * time.Second):
- t.Errorf("timeout waiting for reader to be done")
- }
- } else {
- t.Errorf("pool get success = %v; want 2", len(pool.got))
- }
- return nil
+ if rt2.done() {
+ t.Fatalf("After protocol error on RoundTrip #2, RoundTrip #2 is done; want still in progress")
}
- ct.server = func() error {
- ct.greet()
- var sentErr bool
- var numHeaders int
- var firstStreamID uint32
-
- var hbuf bytes.Buffer
- enc := hpack.NewEncoder(&hbuf)
- for {
- f, err := ct.fr.ReadFrame()
- if err == io.EOF {
- // Client hung up on us, as it should at the end.
- return nil
- }
- if err != nil {
- return nil
- }
- switch f := f.(type) {
- case *WindowUpdateFrame, *SettingsFrame:
- case *HeadersFrame:
- numHeaders++
- if numHeaders == 1 {
- firstStreamID = f.StreamID
- hbuf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- enc.WriteField(hpack.HeaderField{Name: "is-long", Value: "1"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: f.StreamID,
- EndHeaders: true,
- EndStream: false,
- BlockFragment: hbuf.Bytes(),
- })
- continue
- }
- if !sentErr {
- sentErr = true
- ct.fr.WriteRSTStream(f.StreamID, ErrCodeProtocol)
- ct.fr.WriteData(firstStreamID, true, nil)
- continue
- }
- }
- }
- }
- ct.run()
+ // Request #2(b): The short request is retried on a new connection.
+ tc2 := tt.getConn()
+ tc2.wantFrameType(FrameSettings)
+ tc2.wantFrameType(FrameWindowUpdate)
+ tc2.wantHeaders(wantHeader{
+ streamID: 1,
+ endStream: true,
+ })
+ tc2.writeSettings()
+ tc2.wantFrameType(FrameSettings) // settings ACK
+
+ // Request #2(b) succeeds.
+ tc2.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc1.makeHeaderBlockFragment(
+ ":status", "201",
+ ),
+ })
+ rt2.wantStatus(201)
+
+ // Request #1 succeeds.
+ tc1.writeHeaders(HeadersFrameParam{
+ StreamID: 1,
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc1.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
+ rt1.wantStatus(200)
}
func TestClientConnReservations(t *testing.T) {
@@ -5984,7 +4818,7 @@ func TestClientConnReservations(t *testing.T) {
tr := &Transport{TLSClientConfig: tlsConfigInsecure}
defer tr.CloseIdleConnections()
- cc, err := tr.newClientConn(st.cc, false)
+ cc, err := tr.newClientConn(st.cc, false, nil)
if err != nil {
t.Fatal(err)
}
@@ -6023,39 +4857,27 @@ func TestClientConnReservations(t *testing.T) {
}
func TestTransportTimeoutServerHangs(t *testing.T) {
- clientDone := make(chan struct{})
- ct := newClientTester(t)
- ct.client = func() error {
- defer ct.cc.(*net.TCPConn).CloseWrite()
- defer close(clientDone)
+ tc := newTestClientConn(t)
+ tc.greet()
- req, err := http.NewRequest("PUT", "https://dummy.tld/", nil)
- if err != nil {
- return err
- }
+ ctx, cancel := context.WithCancel(context.Background())
+ req, _ := http.NewRequestWithContext(ctx, "PUT", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
- ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
- defer cancel()
- req = req.WithContext(ctx)
- req.Header.Add("Big", strings.Repeat("a", 1<<20))
- _, err = ct.tr.RoundTrip(req)
- if err == nil {
- return errors.New("error should not be nil")
- }
- if ne, ok := err.(net.Error); !ok || !ne.Timeout() {
- return fmt.Errorf("error should be a net error timeout: %v", err)
- }
- return nil
+ tc.wantFrameType(FrameHeaders)
+ tc.advance(5 * time.Second)
+ if f := tc.readFrame(); f != nil {
+ t.Fatalf("unexpected frame: %v", f)
}
- ct.server = func() error {
- ct.greet()
- select {
- case <-time.After(5 * time.Second):
- case <-clientDone:
- }
- return nil
+ if rt.done() {
+ t.Fatalf("after 5 seconds with no response, RoundTrip unexpectedly returned")
+ }
+
+ cancel()
+ tc.sync()
+ if rt.err() != context.Canceled {
+ t.Fatalf("RoundTrip error: %v; want context.Canceled", rt.err())
}
- ct.run()
}
func TestTransportContentLengthWithoutBody(t *testing.T) {
@@ -6248,20 +5070,6 @@ func TestTransportClosesConnAfterGoAwayLastStream(t *testing.T) {
testTransportClosesConnAfterGoAway(t, 1)
}
-type closeOnceConn struct {
- net.Conn
- closed uint32
-}
-
-var errClosed = errors.New("Close of closed connection")
-
-func (c *closeOnceConn) Close() error {
- if atomic.CompareAndSwapUint32(&c.closed, 0, 1) {
- return c.Conn.Close()
- }
- return errClosed
-}
-
// testTransportClosesConnAfterGoAway verifies that the transport
// closes a connection after reading a GOAWAY from it.
//
@@ -6269,53 +5077,35 @@ func (c *closeOnceConn) Close() error {
// When 0, the transport (unsuccessfully) retries the request (stream 1);
// when 1, the transport reads the response after receiving the GOAWAY.
func testTransportClosesConnAfterGoAway(t *testing.T, lastStream uint32) {
- ct := newClientTester(t)
- ct.cc = &closeOnceConn{Conn: ct.cc}
-
- var wg sync.WaitGroup
- wg.Add(1)
- ct.client = func() error {
- defer wg.Done()
- req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
- res, err := ct.tr.RoundTrip(req)
- if err == nil {
- res.Body.Close()
- }
- if gotErr, wantErr := err != nil, lastStream == 0; gotErr != wantErr {
- t.Errorf("RoundTrip got error %v (want error: %v)", err, wantErr)
- }
- if err = ct.cc.Close(); err != errClosed {
- return fmt.Errorf("ct.cc.Close() = %v, want errClosed", err)
- }
- return nil
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeGoAway(lastStream, ErrCodeNo, nil)
+
+ if lastStream > 0 {
+ // Send a valid response to first request.
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: true,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "200",
+ ),
+ })
}
- ct.server = func() error {
- defer wg.Wait()
- ct.greet()
- hf, err := ct.firstHeaders()
- if err != nil {
- return fmt.Errorf("server failed reading HEADERS: %v", err)
- }
- if err := ct.fr.WriteGoAway(lastStream, ErrCodeNo, nil); err != nil {
- return fmt.Errorf("server failed writing GOAWAY: %v", err)
- }
- if lastStream > 0 {
- // Send a valid response to first request.
- var buf bytes.Buffer
- enc := hpack.NewEncoder(&buf)
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
- ct.fr.WriteHeaders(HeadersFrameParam{
- StreamID: hf.StreamID,
- EndHeaders: true,
- EndStream: true,
- BlockFragment: buf.Bytes(),
- })
- }
- return nil
+ tc.closeWrite(io.EOF)
+ err := rt.err()
+ if gotErr, wantErr := err != nil, lastStream == 0; gotErr != wantErr {
+ t.Errorf("RoundTrip got error %v (want error: %v)", err, wantErr)
+ }
+ if !tc.netConnClosed {
+ t.Errorf("ClientConn did not close its net.Conn, expected it to")
}
-
- ct.run()
}
type slowCloser struct {
@@ -6366,3 +5156,183 @@ func TestTransportSlowClose(t *testing.T) {
}
res.Body.Close()
}
+
+func TestTransportDialTLSContext(t *testing.T) {
+ blockCh := make(chan struct{})
+ serverTLSConfigFunc := func(ts *httptest.Server) {
+ ts.Config.TLSConfig = &tls.Config{
+ // Triggers the server to request the clients certificate
+ // during TLS handshake.
+ ClientAuth: tls.RequestClientCert,
+ }
+ }
+ ts := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {},
+ optOnlyServer,
+ serverTLSConfigFunc,
+ )
+ defer ts.Close()
+ tr := &Transport{
+ TLSClientConfig: &tls.Config{
+ GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ // Tests that the context provided to `req` is
+ // passed into this function.
+ close(blockCh)
+ <-cri.Context().Done()
+ return nil, cri.Context().Err()
+ },
+ InsecureSkipVerify: true,
+ },
+ }
+ defer tr.CloseIdleConnections()
+ req, err := http.NewRequest(http.MethodGet, ts.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ req = req.WithContext(ctx)
+ errCh := make(chan error)
+ go func() {
+ defer close(errCh)
+ res, err := tr.RoundTrip(req)
+ if err != nil {
+ errCh <- err
+ return
+ }
+ res.Body.Close()
+ }()
+ // Wait for GetClientCertificate handler to be called
+ <-blockCh
+ // Cancel the context
+ cancel()
+ // Expect the cancellation error here
+ err = <-errCh
+ if err == nil {
+ t.Fatal("cancelling context during client certificate fetch did not error as expected")
+ return
+ }
+ if !errors.Is(err, context.Canceled) {
+ t.Fatalf("unexpected error returned after cancellation: %v", err)
+ }
+}
+
+// TestDialRaceResumesDial tests that, given two concurrent requests
+// to the same address, when the first Dial is interrupted because
+// the first request's context is cancelled, the second request
+// resumes the dial automatically.
+func TestDialRaceResumesDial(t *testing.T) {
+ blockCh := make(chan struct{})
+ serverTLSConfigFunc := func(ts *httptest.Server) {
+ ts.Config.TLSConfig = &tls.Config{
+ // Triggers the server to request the clients certificate
+ // during TLS handshake.
+ ClientAuth: tls.RequestClientCert,
+ }
+ }
+ ts := newServerTester(t,
+ func(w http.ResponseWriter, r *http.Request) {},
+ optOnlyServer,
+ serverTLSConfigFunc,
+ )
+ defer ts.Close()
+ tr := &Transport{
+ TLSClientConfig: &tls.Config{
+ GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ select {
+ case <-blockCh:
+ // If we already errored, return without error.
+ return &tls.Certificate{}, nil
+ default:
+ }
+ close(blockCh)
+ <-cri.Context().Done()
+ return nil, cri.Context().Err()
+ },
+ InsecureSkipVerify: true,
+ },
+ }
+ defer tr.CloseIdleConnections()
+ req, err := http.NewRequest(http.MethodGet, ts.ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Create two requests with independent cancellation.
+ ctx1, cancel1 := context.WithCancel(context.Background())
+ defer cancel1()
+ req1 := req.WithContext(ctx1)
+ ctx2, cancel2 := context.WithCancel(context.Background())
+ defer cancel2()
+ req2 := req.WithContext(ctx2)
+ errCh := make(chan error)
+ go func() {
+ res, err := tr.RoundTrip(req1)
+ if err != nil {
+ errCh <- err
+ return
+ }
+ res.Body.Close()
+ }()
+ successCh := make(chan struct{})
+ go func() {
+ // Don't start request until first request
+ // has initiated the handshake.
+ <-blockCh
+ res, err := tr.RoundTrip(req2)
+ if err != nil {
+ errCh <- err
+ return
+ }
+ res.Body.Close()
+ // Close successCh to indicate that the second request
+ // made it to the server successfully.
+ close(successCh)
+ }()
+ // Wait for GetClientCertificate handler to be called
+ <-blockCh
+ // Cancel the context first
+ cancel1()
+ // Expect the cancellation error here
+ err = <-errCh
+ if err == nil {
+ t.Fatal("cancelling context during client certificate fetch did not error as expected")
+ return
+ }
+ if !errors.Is(err, context.Canceled) {
+ t.Fatalf("unexpected error returned after cancellation: %v", err)
+ }
+ select {
+ case err := <-errCh:
+ t.Fatalf("unexpected second error: %v", err)
+ case <-successCh:
+ }
+}
+
+func TestTransportDataAfter1xxHeader(t *testing.T) {
+ // Discard logger output to avoid spamming stderr.
+ log.SetOutput(io.Discard)
+ defer log.SetOutput(os.Stderr)
+
+ // https://go.dev/issue/65927 - server sends a 1xx response, followed by a DATA frame.
+ tc := newTestClientConn(t)
+ tc.greet()
+
+ req, _ := http.NewRequest("GET", "https://dummy.tld/", nil)
+ rt := tc.roundTrip(req)
+
+ tc.wantFrameType(FrameHeaders)
+ tc.writeHeaders(HeadersFrameParam{
+ StreamID: rt.streamID(),
+ EndHeaders: true,
+ EndStream: false,
+ BlockFragment: tc.makeHeaderBlockFragment(
+ ":status", "100",
+ ),
+ })
+ tc.writeData(rt.streamID(), true, []byte{0})
+ err := rt.err()
+ if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol {
+ t.Errorf("RoundTrip error: %v; want ErrCodeProtocol", err)
+ }
+ tc.wantFrameType(FrameRSTStream)
+}
diff --git a/icmp/helper_posix.go b/icmp/helper_posix.go
index 6c3ebfaed..f625483f0 100644
--- a/icmp/helper_posix.go
+++ b/icmp/helper_posix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows
package icmp
diff --git a/icmp/listen_posix.go b/icmp/listen_posix.go
index 6aea80478..b7cb15b7d 100644
--- a/icmp/listen_posix.go
+++ b/icmp/listen_posix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows
package icmp
diff --git a/icmp/listen_stub.go b/icmp/listen_stub.go
index 1acfb74b6..7b76be1cb 100644
--- a/icmp/listen_stub.go
+++ b/icmp/listen_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
package icmp
diff --git a/idna/go118.go b/idna/go118.go
index c5c4338db..712f1ad83 100644
--- a/idna/go118.go
+++ b/idna/go118.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.18
-// +build go1.18
package idna
diff --git a/idna/idna10.0.0.go b/idna/idna10.0.0.go
index 64ccf85fe..7b3717884 100644
--- a/idna/idna10.0.0.go
+++ b/idna/idna10.0.0.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.10
-// +build go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
diff --git a/idna/idna9.0.0.go b/idna/idna9.0.0.go
index ee1698cef..cc6a892a4 100644
--- a/idna/idna9.0.0.go
+++ b/idna/idna9.0.0.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.10
-// +build !go1.10
// Package idna implements IDNA2008 using the compatibility processing
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
diff --git a/idna/pre_go118.go b/idna/pre_go118.go
index 3aaccab1c..40e74bb3d 100644
--- a/idna/pre_go118.go
+++ b/idna/pre_go118.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.18
-// +build !go1.18
package idna
diff --git a/idna/tables10.0.0.go b/idna/tables10.0.0.go
index d1d62ef45..c6c2bf10a 100644
--- a/idna/tables10.0.0.go
+++ b/idna/tables10.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.10 && !go1.13
-// +build go1.10,!go1.13
package idna
diff --git a/idna/tables11.0.0.go b/idna/tables11.0.0.go
index 167efba71..76789393c 100644
--- a/idna/tables11.0.0.go
+++ b/idna/tables11.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.13 && !go1.14
-// +build go1.13,!go1.14
package idna
diff --git a/idna/tables12.0.0.go b/idna/tables12.0.0.go
index ab40f7bcc..0600cd2ae 100644
--- a/idna/tables12.0.0.go
+++ b/idna/tables12.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.14 && !go1.16
-// +build go1.14,!go1.16
package idna
diff --git a/idna/tables13.0.0.go b/idna/tables13.0.0.go
index 66701eadf..2fb768ef6 100644
--- a/idna/tables13.0.0.go
+++ b/idna/tables13.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.16 && !go1.21
-// +build go1.16,!go1.21
package idna
diff --git a/idna/tables15.0.0.go b/idna/tables15.0.0.go
index 40033778f..5ff05fe1a 100644
--- a/idna/tables15.0.0.go
+++ b/idna/tables15.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build go1.21
-// +build go1.21
package idna
diff --git a/idna/tables9.0.0.go b/idna/tables9.0.0.go
index 4074b5332..0f25e84ca 100644
--- a/idna/tables9.0.0.go
+++ b/idna/tables9.0.0.go
@@ -1,7 +1,6 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
//go:build !go1.10
-// +build !go1.10
package idna
diff --git a/idna/trie12.0.0.go b/idna/trie12.0.0.go
index bb63f904b..8a75b9667 100644
--- a/idna/trie12.0.0.go
+++ b/idna/trie12.0.0.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.16
-// +build !go1.16
package idna
diff --git a/idna/trie13.0.0.go b/idna/trie13.0.0.go
index 7d68a8dc1..fa45bb907 100644
--- a/idna/trie13.0.0.go
+++ b/idna/trie13.0.0.go
@@ -5,7 +5,6 @@
// license that can be found in the LICENSE file.
//go:build go1.16
-// +build go1.16
package idna
diff --git a/internal/iana/gen.go b/internal/iana/gen.go
index 34f0f7eee..0fe65d899 100644
--- a/internal/iana/gen.go
+++ b/internal/iana/gen.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
//go:generate go run gen.go
diff --git a/internal/quic/cmd/interop/Dockerfile b/internal/quic/cmd/interop/Dockerfile
new file mode 100644
index 000000000..b60999a86
--- /dev/null
+++ b/internal/quic/cmd/interop/Dockerfile
@@ -0,0 +1,32 @@
+FROM martenseemann/quic-network-simulator-endpoint:latest AS builder
+
+ARG TARGETPLATFORM
+RUN echo "TARGETPLATFORM: ${TARGETPLATFORM}"
+
+RUN apt-get update && apt-get install -y wget tar git
+
+ENV GOVERSION=1.21.1
+
+RUN platform=$(echo ${TARGETPLATFORM} | tr '/' '-') && \
+ filename="go${GOVERSION}.${platform}.tar.gz" && \
+ wget --no-verbose https://dl.google.com/go/${filename} && \
+ tar xfz ${filename} && \
+ rm ${filename}
+
+ENV PATH="/go/bin:${PATH}"
+
+RUN git clone https://go.googlesource.com/net
+
+WORKDIR /net
+RUN go build -o /interop ./internal/quic/cmd/interop
+
+FROM martenseemann/quic-network-simulator-endpoint:latest
+
+WORKDIR /go-x-net
+
+COPY --from=builder /interop ./
+
+# copy run script and run it
+COPY run_endpoint.sh .
+RUN chmod +x run_endpoint.sh
+ENTRYPOINT [ "./run_endpoint.sh" ]
diff --git a/internal/quic/cmd/interop/README.md b/internal/quic/cmd/interop/README.md
new file mode 100644
index 000000000..aca0571b9
--- /dev/null
+++ b/internal/quic/cmd/interop/README.md
@@ -0,0 +1,7 @@
+This directory contains configuration and programs used to
+integrate with the QUIC Interop Test Runner.
+
+The QUIC Interop Test Runner executes a variety of test cases
+against a matrix of clients and servers.
+
+https://github.com/marten-seemann/quic-interop-runner
diff --git a/internal/quic/cmd/interop/main.go b/internal/quic/cmd/interop/main.go
new file mode 100644
index 000000000..5b652a2b1
--- /dev/null
+++ b/internal/quic/cmd/interop/main.go
@@ -0,0 +1,269 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+// The interop command is the client and server used by QUIC interoperability tests.
+//
+// https://github.com/marten-seemann/quic-interop-runner
+package main
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "log/slog"
+ "net"
+ "net/url"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "golang.org/x/net/quic"
+ "golang.org/x/net/quic/qlog"
+)
+
+var (
+ listen = flag.String("listen", "", "listen address")
+ cert = flag.String("cert", "", "certificate")
+ pkey = flag.String("key", "", "private key")
+ root = flag.String("root", "", "serve files from this root")
+ output = flag.String("output", "", "directory to write files to")
+ qlogdir = flag.String("qlog", "", "directory to write qlog output to")
+)
+
+func main() {
+ ctx := context.Background()
+ flag.Parse()
+ urls := flag.Args()
+
+ config := &quic.Config{
+ TLSConfig: &tls.Config{
+ InsecureSkipVerify: true,
+ MinVersion: tls.VersionTLS13,
+ NextProtos: []string{"hq-interop"},
+ },
+ MaxBidiRemoteStreams: -1,
+ MaxUniRemoteStreams: -1,
+ QLogLogger: slog.New(qlog.NewJSONHandler(qlog.HandlerOptions{
+ Level: quic.QLogLevelFrame,
+ Dir: *qlogdir,
+ })),
+ }
+ if *cert != "" {
+ c, err := tls.LoadX509KeyPair(*cert, *pkey)
+ if err != nil {
+ log.Fatal(err)
+ }
+ config.TLSConfig.Certificates = []tls.Certificate{c}
+ }
+ if *root != "" {
+ config.MaxBidiRemoteStreams = 100
+ }
+ if keylog := os.Getenv("SSLKEYLOGFILE"); keylog != "" {
+ f, err := os.Create(keylog)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer f.Close()
+ config.TLSConfig.KeyLogWriter = f
+ }
+
+ testcase := os.Getenv("TESTCASE")
+ switch testcase {
+ case "handshake", "keyupdate":
+ basicTest(ctx, config, urls)
+ return
+ case "chacha20":
+ // "[...] offer only ChaCha20 as a ciphersuite."
+ //
+ // crypto/tls does not support configuring TLS 1.3 ciphersuites,
+ // so we can't support this test.
+ case "transfer":
+ // "The client should use small initial flow control windows
+ // for both stream- and connection-level flow control
+ // such that the during the transfer of files on the order of 1 MB
+ // the flow control window needs to be increased."
+ config.MaxStreamReadBufferSize = 64 << 10
+ config.MaxConnReadBufferSize = 64 << 10
+ basicTest(ctx, config, urls)
+ return
+ case "http3":
+ // TODO
+ case "multiconnect":
+ // TODO
+ case "resumption":
+ // TODO
+ case "retry":
+ // TODO
+ case "versionnegotiation":
+ // "The client should start a connection using
+ // an unsupported version number [...]"
+ //
+ // We don't support setting the client's version,
+ // so only run this test as a server.
+ if *listen != "" && len(urls) == 0 {
+ basicTest(ctx, config, urls)
+ return
+ }
+ case "v2":
+ // We do not support QUIC v2.
+ case "zerortt":
+ // TODO
+ }
+ fmt.Printf("unsupported test case %q\n", testcase)
+ os.Exit(127)
+}
+
+// basicTest runs the standard test setup.
+//
+// As a server, it serves the contents of the -root directory.
+// As a client, it downloads all the provided URLs in parallel,
+// making one connection to each destination server.
+func basicTest(ctx context.Context, config *quic.Config, urls []string) {
+ l, err := quic.Listen("udp", *listen, config)
+ if err != nil {
+ log.Fatal(err)
+ }
+ log.Printf("listening on %v", l.LocalAddr())
+
+ byAuthority := map[string][]*url.URL{}
+ for _, s := range urls {
+ u, addr, err := parseURL(s)
+ if err != nil {
+ log.Fatal(err)
+ }
+ byAuthority[addr] = append(byAuthority[addr], u)
+ }
+ var g sync.WaitGroup
+ defer g.Wait()
+ for addr, u := range byAuthority {
+ addr, u := addr, u
+ g.Add(1)
+ go func() {
+ defer g.Done()
+ fetchFrom(ctx, config, l, addr, u)
+ }()
+ }
+
+ if config.MaxBidiRemoteStreams >= 0 {
+ serve(ctx, l)
+ }
+}
+
+func serve(ctx context.Context, l *quic.Endpoint) error {
+ for {
+ c, err := l.Accept(ctx)
+ if err != nil {
+ return err
+ }
+ go serveConn(ctx, c)
+ }
+}
+
+func serveConn(ctx context.Context, c *quic.Conn) {
+ for {
+ s, err := c.AcceptStream(ctx)
+ if err != nil {
+ return
+ }
+ go func() {
+ if err := serveReq(ctx, s); err != nil {
+ log.Print("serveReq:", err)
+ }
+ }()
+ }
+}
+
+func serveReq(ctx context.Context, s *quic.Stream) error {
+ defer s.Close()
+ req, err := io.ReadAll(s)
+ if err != nil {
+ return err
+ }
+ if !bytes.HasSuffix(req, []byte("\r\n")) {
+ return errors.New("invalid request")
+ }
+ req = bytes.TrimSuffix(req, []byte("\r\n"))
+ if !bytes.HasPrefix(req, []byte("GET /")) {
+ return errors.New("invalid request")
+ }
+ req = bytes.TrimPrefix(req, []byte("GET /"))
+ if !filepath.IsLocal(string(req)) {
+ return errors.New("invalid request")
+ }
+ f, err := os.Open(filepath.Join(*root, string(req)))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ _, err = io.Copy(s, f)
+ return err
+}
+
+func parseURL(s string) (u *url.URL, authority string, err error) {
+ u, err = url.Parse(s)
+ if err != nil {
+ return nil, "", err
+ }
+ host := u.Hostname()
+ port := u.Port()
+ if port == "" {
+ port = "443"
+ }
+ authority = net.JoinHostPort(host, port)
+ return u, authority, nil
+}
+
+func fetchFrom(ctx context.Context, config *quic.Config, l *quic.Endpoint, addr string, urls []*url.URL) {
+ conn, err := l.Dial(ctx, "udp", addr, config)
+ if err != nil {
+ log.Printf("%v: %v", addr, err)
+ return
+ }
+ log.Printf("connected to %v", addr)
+ defer conn.Close()
+ var g sync.WaitGroup
+ for _, u := range urls {
+ u := u
+ g.Add(1)
+ go func() {
+ defer g.Done()
+ if err := fetchOne(ctx, conn, u); err != nil {
+ log.Printf("fetch %v: %v", u, err)
+ } else {
+ log.Printf("fetched %v", u)
+ }
+ }()
+ }
+ g.Wait()
+}
+
+func fetchOne(ctx context.Context, conn *quic.Conn, u *url.URL) error {
+ if len(u.Path) == 0 || u.Path[0] != '/' || !filepath.IsLocal(u.Path[1:]) {
+ return errors.New("invalid path")
+ }
+ file, err := os.Create(filepath.Join(*output, u.Path[1:]))
+ if err != nil {
+ return err
+ }
+ s, err := conn.NewStream(ctx)
+ if err != nil {
+ return err
+ }
+ defer s.Close()
+ if _, err := s.Write([]byte("GET " + u.Path + "\r\n")); err != nil {
+ return err
+ }
+ s.CloseWrite()
+ if _, err := io.Copy(file, s); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/internal/quic/cmd/interop/main_test.go b/internal/quic/cmd/interop/main_test.go
new file mode 100644
index 000000000..4119740e6
--- /dev/null
+++ b/internal/quic/cmd/interop/main_test.go
@@ -0,0 +1,174 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+ "testing"
+)
+
+func init() {
+ // We reexec the test binary with CMD_INTEROP_MAIN=1 to run main.
+ if os.Getenv("CMD_INTEROP_MAIN") == "1" {
+ main()
+ os.Exit(0)
+ }
+}
+
+var (
+ tryExecOnce sync.Once
+ tryExecErr error
+)
+
+// needsExec skips the test if we can't use exec.Command.
+func needsExec(t *testing.T) {
+ tryExecOnce.Do(func() {
+ cmd := exec.Command(os.Args[0], "-test.list=^$")
+ cmd.Env = []string{}
+ tryExecErr = cmd.Run()
+ })
+ if tryExecErr != nil {
+ t.Skipf("skipping test: cannot exec subprocess: %v", tryExecErr)
+ }
+}
+
+type interopTest struct {
+ donec chan struct{}
+ addr string
+ cmd *exec.Cmd
+}
+
+func run(ctx context.Context, t *testing.T, name, testcase string, args []string) *interopTest {
+ needsExec(t)
+ ctx, cancel := context.WithCancel(ctx)
+ cmd := exec.CommandContext(ctx, os.Args[0], args...)
+ out, err := cmd.StderrPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ cmd.Stdout = cmd.Stderr
+ cmd.Env = []string{
+ "CMD_INTEROP_MAIN=1",
+ "TESTCASE=" + testcase,
+ }
+ t.Logf("run %v: %v", name, args)
+ err = cmd.Start()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ addrc := make(chan string, 1)
+ donec := make(chan struct{})
+ go func() {
+ defer close(addrc)
+ defer close(donec)
+ defer t.Logf("%v done", name)
+ s := bufio.NewScanner(out)
+ for s.Scan() {
+ line := s.Text()
+ t.Logf("%v: %v", name, line)
+ _, addr, ok := strings.Cut(line, "listening on ")
+ if ok {
+ select {
+ case addrc <- addr:
+ default:
+ }
+ }
+ }
+ }()
+
+ t.Cleanup(func() {
+ cancel()
+ <-donec
+ })
+
+ addr, ok := <-addrc
+ if !ok {
+ t.Fatal(cmd.Wait())
+ }
+ _, port, _ := net.SplitHostPort(addr)
+ addr = net.JoinHostPort("localhost", port)
+
+ iop := &interopTest{
+ cmd: cmd,
+ donec: donec,
+ addr: addr,
+ }
+ return iop
+}
+
+func (iop *interopTest) wait() {
+ <-iop.donec
+}
+
+func TestTransfer(t *testing.T) {
+ ctx := context.Background()
+ src := t.TempDir()
+ dst := t.TempDir()
+ certs := t.TempDir()
+ certFile := filepath.Join(certs, "cert.pem")
+ keyFile := filepath.Join(certs, "key.pem")
+ sourceName := "source"
+ content := []byte("hello, world\n")
+
+ os.WriteFile(certFile, localhostCert, 0600)
+ os.WriteFile(keyFile, localhostKey, 0600)
+ os.WriteFile(filepath.Join(src, sourceName), content, 0600)
+
+ srv := run(ctx, t, "server", "transfer", []string{
+ "-listen", "localhost:0",
+ "-cert", filepath.Join(certs, "cert.pem"),
+ "-key", filepath.Join(certs, "key.pem"),
+ "-root", src,
+ })
+ cli := run(ctx, t, "client", "transfer", []string{
+ "-output", dst, "https://" + srv.addr + "/" + sourceName,
+ })
+ cli.wait()
+
+ got, err := os.ReadFile(filepath.Join(dst, "source"))
+ if err != nil {
+ t.Fatalf("reading downloaded file: %v", err)
+ }
+ if !bytes.Equal(got, content) {
+ t.Fatalf("got downloaded file: %q, want %q", string(got), string(content))
+ }
+}
+
+// localhostCert is a PEM-encoded TLS cert with SAN IPs
+// "127.0.0.1" and "[::1]", expiring at Jan 29 16:00:00 2084 GMT.
+// generated from src/crypto/tls:
+// go run generate_cert.go --ecdsa-curve P256 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h
+var localhostCert = []byte(`-----BEGIN CERTIFICATE-----
+MIIBrDCCAVKgAwIBAgIPCvPhO+Hfv+NW76kWxULUMAoGCCqGSM49BAMCMBIxEDAO
+BgNVBAoTB0FjbWUgQ28wIBcNNzAwMTAxMDAwMDAwWhgPMjA4NDAxMjkxNjAwMDBa
+MBIxEDAOBgNVBAoTB0FjbWUgQ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARh
+WRF8p8X9scgW7JjqAwI9nYV8jtkdhqAXG9gyEgnaFNN5Ze9l3Tp1R9yCDBMNsGms
+PyfMPe5Jrha/LmjgR1G9o4GIMIGFMA4GA1UdDwEB/wQEAwIChDATBgNVHSUEDDAK
+BggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSOJri/wLQxq6oC
+Y6ZImms/STbTljAuBgNVHREEJzAlggtleGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAA
+AAAAAAAAAAAAATAKBggqhkjOPQQDAgNIADBFAiBUguxsW6TGhixBAdORmVNnkx40
+HjkKwncMSDbUaeL9jQIhAJwQ8zV9JpQvYpsiDuMmqCuW35XXil3cQ6Drz82c+fvE
+-----END CERTIFICATE-----`)
+
+// localhostKey is the private key for localhostCert.
+var localhostKey = []byte(testingKey(`-----BEGIN TESTING KEY-----
+MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgY1B1eL/Bbwf/MDcs
+rnvvWhFNr1aGmJJR59PdCN9lVVqhRANCAARhWRF8p8X9scgW7JjqAwI9nYV8jtkd
+hqAXG9gyEgnaFNN5Ze9l3Tp1R9yCDBMNsGmsPyfMPe5Jrha/LmjgR1G9
+-----END TESTING KEY-----`))
+
+// testingKey helps keep security scanners from getting excited about a private key in this file.
+func testingKey(s string) string { return strings.ReplaceAll(s, "TESTING KEY", "PRIVATE KEY") }
diff --git a/internal/quic/cmd/interop/run_endpoint.sh b/internal/quic/cmd/interop/run_endpoint.sh
new file mode 100644
index 000000000..442039bc0
--- /dev/null
+++ b/internal/quic/cmd/interop/run_endpoint.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# Set up the routing needed for the simulation
+/setup.sh
+
+# The following variables are available for use:
+# - ROLE contains the role of this execution context, client or server
+# - SERVER_PARAMS contains user-supplied command line parameters
+# - CLIENT_PARAMS contains user-supplied command line parameters
+
+if [ "$ROLE" == "client" ]; then
+ # Wait for the simulator to start up.
+ /wait-for-it.sh sim:57832 -s -t 30
+ ./interop -output=/downloads -qlog=$QLOGDIR $CLIENT_PARAMS $REQUESTS
+elif [ "$ROLE" == "server" ]; then
+ ./interop -cert=/certs/cert.pem -key=/certs/priv.key -qlog=$QLOGDIR -listen=:443 -root=/www "$@" $SERVER_PARAMS
+fi
diff --git a/internal/quic/doc.go b/internal/quic/doc.go
deleted file mode 100644
index 2fe17fe22..000000000
--- a/internal/quic/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package quic is an experimental, incomplete implementation of the QUIC protocol.
-// This package is a work in progress, and is not ready for use at this time.
-//
-// This package implements (or will implement) RFC 9000, RFC 9001, and RFC 9002.
-package quic
diff --git a/internal/quic/packet_protection.go b/internal/quic/packet_protection.go
deleted file mode 100644
index 1f0a735e8..000000000
--- a/internal/quic/packet_protection.go
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-
-package quic
-
-import (
- "crypto"
- "crypto/aes"
- "crypto/cipher"
- "crypto/sha256"
- "crypto/tls"
- "errors"
- "fmt"
- "hash"
-
- "golang.org/x/crypto/chacha20"
- "golang.org/x/crypto/chacha20poly1305"
- "golang.org/x/crypto/cryptobyte"
- "golang.org/x/crypto/hkdf"
-)
-
-var errInvalidPacket = errors.New("quic: invalid packet")
-
-// keys holds the cryptographic material used to protect packets
-// at an encryption level and direction. (e.g., Initial client keys.)
-//
-// keys are not safe for concurrent use.
-type keys struct {
- // AEAD function used for packet protection.
- aead cipher.AEAD
-
- // The header_protection function as defined in:
- // https://www.rfc-editor.org/rfc/rfc9001#section-5.4.1
- //
- // This function takes a sample of the packet ciphertext
- // and returns a 5-byte mask which will be applied to the
- // protected portions of the packet header.
- headerProtection func(sample []byte) (mask [5]byte)
-
- // IV used to construct the AEAD nonce.
- iv []byte
-}
-
-// newKeys creates keys for a given cipher suite and secret.
-//
-// It returns an error if the suite is unknown.
-func newKeys(suite uint16, secret []byte) (keys, error) {
- switch suite {
- case tls.TLS_AES_128_GCM_SHA256:
- return newAESKeys(secret, crypto.SHA256, 128/8), nil
- case tls.TLS_AES_256_GCM_SHA384:
- return newAESKeys(secret, crypto.SHA384, 256/8), nil
- case tls.TLS_CHACHA20_POLY1305_SHA256:
- return newChaCha20Keys(secret), nil
- }
- return keys{}, fmt.Errorf("unknown cipher suite %x", suite)
-}
-
-func newAESKeys(secret []byte, h crypto.Hash, keyBytes int) keys {
- // https://www.rfc-editor.org/rfc/rfc9001#section-5.1
- key := hkdfExpandLabel(h.New, secret, "quic key", nil, keyBytes)
- c, err := aes.NewCipher(key)
- if err != nil {
- panic(err)
- }
- aead, err := cipher.NewGCM(c)
- if err != nil {
- panic(err)
- }
- iv := hkdfExpandLabel(h.New, secret, "quic iv", nil, aead.NonceSize())
- // https://www.rfc-editor.org/rfc/rfc9001#section-5.4.3
- hpKey := hkdfExpandLabel(h.New, secret, "quic hp", nil, keyBytes)
- hp, err := aes.NewCipher(hpKey)
- if err != nil {
- panic(err)
- }
- var scratch [aes.BlockSize]byte
- headerProtection := func(sample []byte) (mask [5]byte) {
- hp.Encrypt(scratch[:], sample)
- copy(mask[:], scratch[:])
- return mask
- }
- return keys{
- aead: aead,
- iv: iv,
- headerProtection: headerProtection,
- }
-}
-
-func newChaCha20Keys(secret []byte) keys {
- // https://www.rfc-editor.org/rfc/rfc9001#section-5.1
- key := hkdfExpandLabel(sha256.New, secret, "quic key", nil, chacha20poly1305.KeySize)
- aead, err := chacha20poly1305.New(key)
- if err != nil {
- panic(err)
- }
- iv := hkdfExpandLabel(sha256.New, secret, "quic iv", nil, aead.NonceSize())
- // https://www.rfc-editor.org/rfc/rfc9001#section-5.4.4
- hpKey := hkdfExpandLabel(sha256.New, secret, "quic hp", nil, chacha20.KeySize)
- headerProtection := func(sample []byte) [5]byte {
- counter := uint32(sample[3])<<24 | uint32(sample[2])<<16 | uint32(sample[1])<<8 | uint32(sample[0])
- nonce := sample[4:16]
- c, err := chacha20.NewUnauthenticatedCipher(hpKey, nonce)
- if err != nil {
- panic(err)
- }
- c.SetCounter(counter)
- var mask [5]byte
- c.XORKeyStream(mask[:], mask[:])
- return mask
- }
- return keys{
- aead: aead,
- iv: iv,
- headerProtection: headerProtection,
- }
-}
-
-// https://www.rfc-editor.org/rfc/rfc9001#section-5.2-2
-var initialSalt = []byte{0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a}
-
-// initialKeys returns the keys used to protect Initial packets.
-//
-// The Initial packet keys are derived from the Destination Connection ID
-// field in the client's first Initial packet.
-//
-// https://www.rfc-editor.org/rfc/rfc9001#section-5.2
-func initialKeys(cid []byte) (clientKeys, serverKeys keys) {
- initialSecret := hkdf.Extract(sha256.New, cid, initialSalt)
- clientInitialSecret := hkdfExpandLabel(sha256.New, initialSecret, "client in", nil, sha256.Size)
- clientKeys, err := newKeys(tls.TLS_AES_128_GCM_SHA256, clientInitialSecret)
- if err != nil {
- panic(err)
- }
-
- serverInitialSecret := hkdfExpandLabel(sha256.New, initialSecret, "server in", nil, sha256.Size)
- serverKeys, err = newKeys(tls.TLS_AES_128_GCM_SHA256, serverInitialSecret)
- if err != nil {
- panic(err)
- }
-
- return clientKeys, serverKeys
-}
-
-const headerProtectionSampleSize = 16
-
-// aeadOverhead is the difference in size between the AEAD output and input.
-// All cipher suites defined for use with QUIC have 16 bytes of overhead.
-const aeadOverhead = 16
-
-// xorIV xors the packet protection IV with the packet number.
-func (k keys) xorIV(pnum packetNumber) {
- k.iv[len(k.iv)-8] ^= uint8(pnum >> 56)
- k.iv[len(k.iv)-7] ^= uint8(pnum >> 48)
- k.iv[len(k.iv)-6] ^= uint8(pnum >> 40)
- k.iv[len(k.iv)-5] ^= uint8(pnum >> 32)
- k.iv[len(k.iv)-4] ^= uint8(pnum >> 24)
- k.iv[len(k.iv)-3] ^= uint8(pnum >> 16)
- k.iv[len(k.iv)-2] ^= uint8(pnum >> 8)
- k.iv[len(k.iv)-1] ^= uint8(pnum)
-}
-
-// initialized returns true if valid keys are available.
-func (k keys) initialized() bool {
- return k.aead != nil
-}
-
-// discard discards the keys (in the sense that we won't use them any more,
-// not that the keys are securely erased).
-//
-// https://www.rfc-editor.org/rfc/rfc9001.html#section-4.9
-func (k *keys) discard() {
- *k = keys{}
-}
-
-// protect applies packet protection to a packet.
-//
-// On input, hdr contains the packet header, pay the unencrypted payload,
-// pnumOff the offset of the packet number in the header, and pnum the untruncated
-// packet number.
-//
-// protect returns the result of appending the encrypted payload to hdr and
-// applying header protection.
-func (k keys) protect(hdr, pay []byte, pnumOff int, pnum packetNumber) []byte {
- k.xorIV(pnum)
- hdr = k.aead.Seal(hdr, k.iv, pay, hdr)
- k.xorIV(pnum)
-
- // Apply header protection.
- pnumSize := int(hdr[0]&0x03) + 1
- sample := hdr[pnumOff+4:][:headerProtectionSampleSize]
- mask := k.headerProtection(sample)
- if isLongHeader(hdr[0]) {
- hdr[0] ^= mask[0] & 0x0f
- } else {
- hdr[0] ^= mask[0] & 0x1f
- }
- for i := 0; i < pnumSize; i++ {
- hdr[pnumOff+i] ^= mask[1+i]
- }
-
- return hdr
-}
-
-// unprotect removes packet protection from a packet.
-//
-// On input, pkt contains the full protected packet, pnumOff the offset of
-// the packet number in the header, and pnumMax the largest packet number
-// seen in the number space of this packet.
-//
-// unprotect removes header protection from the header in pkt, and returns
-// the unprotected payload and packet number.
-func (k keys) unprotect(pkt []byte, pnumOff int, pnumMax packetNumber) (pay []byte, num packetNumber, err error) {
- if len(pkt) < pnumOff+4+headerProtectionSampleSize {
- return nil, 0, errInvalidPacket
- }
- numpay := pkt[pnumOff:]
- sample := numpay[4:][:headerProtectionSampleSize]
- mask := k.headerProtection(sample)
- if isLongHeader(pkt[0]) {
- pkt[0] ^= mask[0] & 0x0f
- } else {
- pkt[0] ^= mask[0] & 0x1f
- }
- pnumLen := int(pkt[0]&0x03) + 1
- pnum := packetNumber(0)
- for i := 0; i < pnumLen; i++ {
- numpay[i] ^= mask[1+i]
- pnum = (pnum << 8) | packetNumber(numpay[i])
- }
- pnum = decodePacketNumber(pnumMax, pnum, pnumLen)
-
- hdr := pkt[:pnumOff+pnumLen]
- pay = numpay[pnumLen:]
- k.xorIV(pnum)
- pay, err = k.aead.Open(pay[:0], k.iv, pay, hdr)
- k.xorIV(pnum)
- if err != nil {
- return nil, 0, err
- }
-
- return pay, pnum, nil
-}
-
-// hdkfExpandLabel implements HKDF-Expand-Label from RFC 8446, Section 7.1.
-//
-// Copied from crypto/tls/key_schedule.go.
-func hkdfExpandLabel(hash func() hash.Hash, secret []byte, label string, context []byte, length int) []byte {
- var hkdfLabel cryptobyte.Builder
- hkdfLabel.AddUint16(uint16(length))
- hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte("tls13 "))
- b.AddBytes([]byte(label))
- })
- hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(context)
- })
- out := make([]byte, length)
- n, err := hkdf.Expand(hash, secret, hkdfLabel.BytesOrPanic()).Read(out)
- if err != nil || n != length {
- panic("quic: HKDF-Expand-Label invocation failed unexpectedly")
- }
- return out
-}
diff --git a/internal/quic/packet_test.go b/internal/quic/packet_test.go
deleted file mode 100644
index b13a587e5..000000000
--- a/internal/quic/packet_test.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-
-package quic
-
-import (
- "bytes"
- "encoding/hex"
- "strings"
- "testing"
-)
-
-func TestPacketHeader(t *testing.T) {
- for _, test := range []struct {
- name string
- packet []byte
- isLongHeader bool
- packetType packetType
- dstConnID []byte
- }{{
- // Initial packet from https://www.rfc-editor.org/rfc/rfc9001#section-a.1
- // (truncated)
- name: "rfc9001_a1",
- packet: unhex(`
- c000000001088394c8f03e5157080000 449e7b9aec34d1b1c98dd7689fb8ec11
- `),
- isLongHeader: true,
- packetType: packetTypeInitial,
- dstConnID: unhex(`8394c8f03e515708`),
- }, {
- // Initial packet from https://www.rfc-editor.org/rfc/rfc9001#section-a.3
- // (truncated)
- name: "rfc9001_a3",
- packet: unhex(`
- cf000000010008f067a5502a4262b500 4075c0d95a482cd0991cd25b0aac406a
- `),
- isLongHeader: true,
- packetType: packetTypeInitial,
- dstConnID: []byte{},
- }, {
- // Retry packet from https://www.rfc-editor.org/rfc/rfc9001#section-a.4
- name: "rfc9001_a4",
- packet: unhex(`
- ff000000010008f067a5502a4262b574 6f6b656e04a265ba2eff4d829058fb3f
- 0f2496ba
- `),
- isLongHeader: true,
- packetType: packetTypeRetry,
- dstConnID: []byte{},
- }, {
- // Short header packet from https://www.rfc-editor.org/rfc/rfc9001#section-a.5
- name: "rfc9001_a5",
- packet: unhex(`
- 4cfe4189655e5cd55c41f69080575d7999c25a5bfb
- `),
- isLongHeader: false,
- packetType: packetType1RTT,
- dstConnID: unhex(`fe4189655e5cd55c`),
- }, {
- // Version Negotiation packet.
- name: "version_negotiation",
- packet: unhex(`
- 80 00000000 01ff0001020304
- `),
- isLongHeader: true,
- packetType: packetTypeVersionNegotiation,
- dstConnID: []byte{0xff},
- }, {
- // Too-short packet.
- name: "truncated_after_connid_length",
- packet: unhex(`
- cf0000000105
- `),
- isLongHeader: true,
- packetType: packetTypeInitial,
- dstConnID: nil,
- }, {
- // Too-short packet.
- name: "truncated_after_version",
- packet: unhex(`
- cf00000001
- `),
- isLongHeader: true,
- packetType: packetTypeInitial,
- dstConnID: nil,
- }, {
- // Much too short packet.
- name: "truncated_in_version",
- packet: unhex(`
- cf000000
- `),
- isLongHeader: true,
- packetType: packetTypeInvalid,
- dstConnID: nil,
- }} {
- t.Run(test.name, func(t *testing.T) {
- if got, want := isLongHeader(test.packet[0]), test.isLongHeader; got != want {
- t.Errorf("packet %x:\nisLongHeader(packet) = %v, want %v", test.packet, got, want)
- }
- if got, want := getPacketType(test.packet), test.packetType; got != want {
- t.Errorf("packet %x:\ngetPacketType(packet) = %v, want %v", test.packet, got, want)
- }
- gotConnID, gotOK := dstConnIDForDatagram(test.packet)
- wantConnID, wantOK := test.dstConnID, test.dstConnID != nil
- if !bytes.Equal(gotConnID, wantConnID) || gotOK != wantOK {
- t.Errorf("packet %x:\ndstConnIDForDatagram(packet) = {%x}, %v; want {%x}, %v", test.packet, gotConnID, gotOK, wantConnID, wantOK)
- }
- })
- }
-}
-
-func unhex(s string) []byte {
- b, err := hex.DecodeString(strings.Map(func(c rune) rune {
- switch c {
- case ' ', '\t', '\n':
- return -1
- }
- return c
- }, s))
- if err != nil {
- panic(err)
- }
- return b
-}
diff --git a/internal/socket/cmsghdr.go b/internal/socket/cmsghdr.go
index 4bdaaaf1a..33a5bf59c 100644
--- a/internal/socket/cmsghdr.go
+++ b/internal/socket/cmsghdr.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package socket
diff --git a/internal/socket/cmsghdr_bsd.go b/internal/socket/cmsghdr_bsd.go
index 0d30e0a0f..68f438c84 100644
--- a/internal/socket/cmsghdr_bsd.go
+++ b/internal/socket/cmsghdr_bsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd
-// +build aix darwin dragonfly freebsd netbsd openbsd
package socket
diff --git a/internal/socket/cmsghdr_linux_32bit.go b/internal/socket/cmsghdr_linux_32bit.go
index 4936e8a6f..058ea8de8 100644
--- a/internal/socket/cmsghdr_linux_32bit.go
+++ b/internal/socket/cmsghdr_linux_32bit.go
@@ -3,8 +3,6 @@
// license that can be found in the LICENSE file.
//go:build (arm || mips || mipsle || 386 || ppc) && linux
-// +build arm mips mipsle 386 ppc
-// +build linux
package socket
diff --git a/internal/socket/cmsghdr_linux_64bit.go b/internal/socket/cmsghdr_linux_64bit.go
index f6877f98f..3ca0d3a0a 100644
--- a/internal/socket/cmsghdr_linux_64bit.go
+++ b/internal/socket/cmsghdr_linux_64bit.go
@@ -3,8 +3,6 @@
// license that can be found in the LICENSE file.
//go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux
-// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x
-// +build linux
package socket
diff --git a/internal/socket/cmsghdr_solaris_64bit.go b/internal/socket/cmsghdr_solaris_64bit.go
index d3dbe1b8e..6d0e426cd 100644
--- a/internal/socket/cmsghdr_solaris_64bit.go
+++ b/internal/socket/cmsghdr_solaris_64bit.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build amd64 && solaris
-// +build amd64,solaris
package socket
diff --git a/internal/socket/cmsghdr_stub.go b/internal/socket/cmsghdr_stub.go
index 1d9f2ed62..7ca9cb7e7 100644
--- a/internal/socket/cmsghdr_stub.go
+++ b/internal/socket/cmsghdr_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos
package socket
diff --git a/internal/socket/cmsghdr_unix.go b/internal/socket/cmsghdr_unix.go
index 19d46789d..0211f225b 100644
--- a/internal/socket/cmsghdr_unix.go
+++ b/internal/socket/cmsghdr_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package socket
diff --git a/internal/socket/complete_dontwait.go b/internal/socket/complete_dontwait.go
index 5b1d50ae7..2038f2904 100644
--- a/internal/socket/complete_dontwait.go
+++ b/internal/socket/complete_dontwait.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package socket
diff --git a/internal/socket/complete_nodontwait.go b/internal/socket/complete_nodontwait.go
index be6340958..70e6f448b 100644
--- a/internal/socket/complete_nodontwait.go
+++ b/internal/socket/complete_nodontwait.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || windows || zos
-// +build aix windows zos
package socket
diff --git a/internal/socket/defs_aix.go b/internal/socket/defs_aix.go
index 0bc1703ca..2c847bbeb 100644
--- a/internal/socket/defs_aix.go
+++ b/internal/socket/defs_aix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package socket
diff --git a/internal/socket/defs_darwin.go b/internal/socket/defs_darwin.go
index 0f07b5725..d94fff755 100644
--- a/internal/socket/defs_darwin.go
+++ b/internal/socket/defs_darwin.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package socket
diff --git a/internal/socket/defs_dragonfly.go b/internal/socket/defs_dragonfly.go
index 0f07b5725..d94fff755 100644
--- a/internal/socket/defs_dragonfly.go
+++ b/internal/socket/defs_dragonfly.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package socket
diff --git a/internal/socket/defs_freebsd.go b/internal/socket/defs_freebsd.go
index 0f07b5725..d94fff755 100644
--- a/internal/socket/defs_freebsd.go
+++ b/internal/socket/defs_freebsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package socket
diff --git a/internal/socket/defs_linux.go b/internal/socket/defs_linux.go
index bbaafdf30..d0d52bdfb 100644
--- a/internal/socket/defs_linux.go
+++ b/internal/socket/defs_linux.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package socket
diff --git a/internal/socket/defs_netbsd.go b/internal/socket/defs_netbsd.go
index 5b57b0c42..8db525bf4 100644
--- a/internal/socket/defs_netbsd.go
+++ b/internal/socket/defs_netbsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package socket
diff --git a/internal/socket/defs_openbsd.go b/internal/socket/defs_openbsd.go
index 0f07b5725..d94fff755 100644
--- a/internal/socket/defs_openbsd.go
+++ b/internal/socket/defs_openbsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package socket
diff --git a/internal/socket/defs_solaris.go b/internal/socket/defs_solaris.go
index 0f07b5725..d94fff755 100644
--- a/internal/socket/defs_solaris.go
+++ b/internal/socket/defs_solaris.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package socket
diff --git a/internal/socket/empty.s b/internal/socket/empty.s
index 90ab4ca3d..49d79791e 100644
--- a/internal/socket/empty.s
+++ b/internal/socket/empty.s
@@ -3,6 +3,5 @@
// license that can be found in the LICENSE file.
//go:build darwin && go1.12
-// +build darwin,go1.12
// This exists solely so we can linkname in symbols from syscall.
diff --git a/internal/socket/error_unix.go b/internal/socket/error_unix.go
index 78f412904..7a5cc5c43 100644
--- a/internal/socket/error_unix.go
+++ b/internal/socket/error_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package socket
diff --git a/internal/socket/iovec_32bit.go b/internal/socket/iovec_32bit.go
index 2b8fbb3f3..340e53fbd 100644
--- a/internal/socket/iovec_32bit.go
+++ b/internal/socket/iovec_32bit.go
@@ -3,8 +3,6 @@
// license that can be found in the LICENSE file.
//go:build (arm || mips || mipsle || 386 || ppc) && (darwin || dragonfly || freebsd || linux || netbsd || openbsd)
-// +build arm mips mipsle 386 ppc
-// +build darwin dragonfly freebsd linux netbsd openbsd
package socket
diff --git a/internal/socket/iovec_64bit.go b/internal/socket/iovec_64bit.go
index 2e94e96f8..26470c191 100644
--- a/internal/socket/iovec_64bit.go
+++ b/internal/socket/iovec_64bit.go
@@ -3,8 +3,6 @@
// license that can be found in the LICENSE file.
//go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || zos)
-// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x
-// +build aix darwin dragonfly freebsd linux netbsd openbsd zos
package socket
diff --git a/internal/socket/iovec_solaris_64bit.go b/internal/socket/iovec_solaris_64bit.go
index f7da2bc4d..8859ce103 100644
--- a/internal/socket/iovec_solaris_64bit.go
+++ b/internal/socket/iovec_solaris_64bit.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build amd64 && solaris
-// +build amd64,solaris
package socket
diff --git a/internal/socket/iovec_stub.go b/internal/socket/iovec_stub.go
index 14caf5248..da886b032 100644
--- a/internal/socket/iovec_stub.go
+++ b/internal/socket/iovec_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos
package socket
diff --git a/internal/socket/mmsghdr_stub.go b/internal/socket/mmsghdr_stub.go
index 113e773cd..4825b21e3 100644
--- a/internal/socket/mmsghdr_stub.go
+++ b/internal/socket/mmsghdr_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !linux && !netbsd
-// +build !aix,!linux,!netbsd
package socket
diff --git a/internal/socket/mmsghdr_unix.go b/internal/socket/mmsghdr_unix.go
index 41883c530..311fd2c78 100644
--- a/internal/socket/mmsghdr_unix.go
+++ b/internal/socket/mmsghdr_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || linux || netbsd
-// +build aix linux netbsd
package socket
diff --git a/internal/socket/msghdr_bsd.go b/internal/socket/msghdr_bsd.go
index 25f6847f9..ebff4f6e0 100644
--- a/internal/socket/msghdr_bsd.go
+++ b/internal/socket/msghdr_bsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd
-// +build aix darwin dragonfly freebsd netbsd openbsd
package socket
diff --git a/internal/socket/msghdr_bsdvar.go b/internal/socket/msghdr_bsdvar.go
index 5b8e00f1c..62e6fe861 100644
--- a/internal/socket/msghdr_bsdvar.go
+++ b/internal/socket/msghdr_bsdvar.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || netbsd
-// +build aix darwin dragonfly freebsd netbsd
package socket
diff --git a/internal/socket/msghdr_linux_32bit.go b/internal/socket/msghdr_linux_32bit.go
index b4658fbae..3dd07250a 100644
--- a/internal/socket/msghdr_linux_32bit.go
+++ b/internal/socket/msghdr_linux_32bit.go
@@ -3,8 +3,6 @@
// license that can be found in the LICENSE file.
//go:build (arm || mips || mipsle || 386 || ppc) && linux
-// +build arm mips mipsle 386 ppc
-// +build linux
package socket
diff --git a/internal/socket/msghdr_linux_64bit.go b/internal/socket/msghdr_linux_64bit.go
index 42411affa..5af9ddd6a 100644
--- a/internal/socket/msghdr_linux_64bit.go
+++ b/internal/socket/msghdr_linux_64bit.go
@@ -3,8 +3,6 @@
// license that can be found in the LICENSE file.
//go:build (arm64 || amd64 || loong64 || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x) && linux
-// +build arm64 amd64 loong64 ppc64 ppc64le mips64 mips64le riscv64 s390x
-// +build linux
package socket
diff --git a/internal/socket/msghdr_solaris_64bit.go b/internal/socket/msghdr_solaris_64bit.go
index 3098f5d78..e212b50f8 100644
--- a/internal/socket/msghdr_solaris_64bit.go
+++ b/internal/socket/msghdr_solaris_64bit.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build amd64 && solaris
-// +build amd64,solaris
package socket
diff --git a/internal/socket/msghdr_stub.go b/internal/socket/msghdr_stub.go
index eb79151f6..e87677645 100644
--- a/internal/socket/msghdr_stub.go
+++ b/internal/socket/msghdr_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos
package socket
diff --git a/internal/socket/msghdr_zos_s390x.go b/internal/socket/msghdr_zos_s390x.go
index 324e9ee7d..529db68ee 100644
--- a/internal/socket/msghdr_zos_s390x.go
+++ b/internal/socket/msghdr_zos_s390x.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build s390x && zos
-// +build s390x,zos
package socket
diff --git a/internal/socket/norace.go b/internal/socket/norace.go
index de0ad420f..8af30ecfb 100644
--- a/internal/socket/norace.go
+++ b/internal/socket/norace.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !race
-// +build !race
package socket
diff --git a/internal/socket/race.go b/internal/socket/race.go
index f0a28a625..9afa95808 100644
--- a/internal/socket/race.go
+++ b/internal/socket/race.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build race
-// +build race
package socket
diff --git a/internal/socket/rawconn_mmsg.go b/internal/socket/rawconn_mmsg.go
index 8f79b38f7..043139078 100644
--- a/internal/socket/rawconn_mmsg.go
+++ b/internal/socket/rawconn_mmsg.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build linux
-// +build linux
package socket
diff --git a/internal/socket/rawconn_msg.go b/internal/socket/rawconn_msg.go
index f7d0b0d2b..7c0d7410b 100644
--- a/internal/socket/rawconn_msg.go
+++ b/internal/socket/rawconn_msg.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos
package socket
diff --git a/internal/socket/rawconn_nommsg.go b/internal/socket/rawconn_nommsg.go
index 02f328556..e363fb5a8 100644
--- a/internal/socket/rawconn_nommsg.go
+++ b/internal/socket/rawconn_nommsg.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !linux
-// +build !linux
package socket
diff --git a/internal/socket/rawconn_nomsg.go b/internal/socket/rawconn_nomsg.go
index dd785877b..ff7a8baf0 100644
--- a/internal/socket/rawconn_nomsg.go
+++ b/internal/socket/rawconn_nomsg.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package socket
diff --git a/internal/socket/socket_dontwait_test.go b/internal/socket/socket_dontwait_test.go
index 8eab9900b..1eb3580f6 100644
--- a/internal/socket/socket_dontwait_test.go
+++ b/internal/socket/socket_dontwait_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
package socket_test
diff --git a/internal/socket/socket_test.go b/internal/socket/socket_test.go
index 84907d8bc..faba10606 100644
--- a/internal/socket/socket_test.go
+++ b/internal/socket/socket_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos
package socket_test
diff --git a/internal/socket/sys_bsd.go b/internal/socket/sys_bsd.go
index b258879d4..e7664d48b 100644
--- a/internal/socket/sys_bsd.go
+++ b/internal/socket/sys_bsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd openbsd solaris
package socket
diff --git a/internal/socket/sys_const_unix.go b/internal/socket/sys_const_unix.go
index 5d99f2373..d7627f87e 100644
--- a/internal/socket/sys_const_unix.go
+++ b/internal/socket/sys_const_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package socket
diff --git a/internal/socket/sys_linux.go b/internal/socket/sys_linux.go
index 76f5b8ae5..08d491077 100644
--- a/internal/socket/sys_linux.go
+++ b/internal/socket/sys_linux.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build linux && !s390x && !386
-// +build linux,!s390x,!386
package socket
diff --git a/internal/socket/sys_linux_loong64.go b/internal/socket/sys_linux_loong64.go
index af964e617..1d182470d 100644
--- a/internal/socket/sys_linux_loong64.go
+++ b/internal/socket/sys_linux_loong64.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build loong64
-// +build loong64
package socket
diff --git a/internal/socket/sys_linux_riscv64.go b/internal/socket/sys_linux_riscv64.go
index 5b128fbb2..0e407d125 100644
--- a/internal/socket/sys_linux_riscv64.go
+++ b/internal/socket/sys_linux_riscv64.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build riscv64
-// +build riscv64
package socket
diff --git a/internal/socket/sys_posix.go b/internal/socket/sys_posix.go
index 42b8f2340..58d865482 100644
--- a/internal/socket/sys_posix.go
+++ b/internal/socket/sys_posix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos
package socket
diff --git a/internal/socket/sys_stub.go b/internal/socket/sys_stub.go
index 7cfb349c0..2e5b473c6 100644
--- a/internal/socket/sys_stub.go
+++ b/internal/socket/sys_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package socket
diff --git a/internal/socket/sys_unix.go b/internal/socket/sys_unix.go
index de823932b..93058db5b 100644
--- a/internal/socket/sys_unix.go
+++ b/internal/socket/sys_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package socket
diff --git a/internal/socket/zsys_aix_ppc64.go b/internal/socket/zsys_aix_ppc64.go
index 00691bd52..45bab004c 100644
--- a/internal/socket/zsys_aix_ppc64.go
+++ b/internal/socket/zsys_aix_ppc64.go
@@ -3,7 +3,6 @@
// Added for go1.11 compatibility
//go:build aix
-// +build aix
package socket
diff --git a/internal/socket/zsys_linux_loong64.go b/internal/socket/zsys_linux_loong64.go
index 6a94fec2c..b6fc15a1a 100644
--- a/internal/socket/zsys_linux_loong64.go
+++ b/internal/socket/zsys_linux_loong64.go
@@ -2,7 +2,6 @@
// cgo -godefs defs_linux.go
//go:build loong64
-// +build loong64
package socket
diff --git a/internal/socket/zsys_linux_riscv64.go b/internal/socket/zsys_linux_riscv64.go
index c066272dd..e67fc3cba 100644
--- a/internal/socket/zsys_linux_riscv64.go
+++ b/internal/socket/zsys_linux_riscv64.go
@@ -2,7 +2,6 @@
// cgo -godefs defs_linux.go
//go:build riscv64
-// +build riscv64
package socket
diff --git a/ipv4/control_bsd.go b/ipv4/control_bsd.go
index b7385dfd9..c88da8cbe 100644
--- a/ipv4/control_bsd.go
+++ b/ipv4/control_bsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd
-// +build aix darwin dragonfly freebsd netbsd openbsd
package ipv4
diff --git a/ipv4/control_pktinfo.go b/ipv4/control_pktinfo.go
index 0e748dbdc..14ae2dae4 100644
--- a/ipv4/control_pktinfo.go
+++ b/ipv4/control_pktinfo.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build darwin || linux || solaris
-// +build darwin linux solaris
package ipv4
diff --git a/ipv4/control_stub.go b/ipv4/control_stub.go
index f27322c3e..3ba661160 100644
--- a/ipv4/control_stub.go
+++ b/ipv4/control_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package ipv4
diff --git a/ipv4/control_unix.go b/ipv4/control_unix.go
index 2413e02f8..2e765548f 100644
--- a/ipv4/control_unix.go
+++ b/ipv4/control_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package ipv4
diff --git a/ipv4/defs_aix.go b/ipv4/defs_aix.go
index b70b61824..5e590a7df 100644
--- a/ipv4/defs_aix.go
+++ b/ipv4/defs_aix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
diff --git a/ipv4/defs_darwin.go b/ipv4/defs_darwin.go
index 0ceadfce2..2494ff86a 100644
--- a/ipv4/defs_darwin.go
+++ b/ipv4/defs_darwin.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
diff --git a/ipv4/defs_dragonfly.go b/ipv4/defs_dragonfly.go
index a84630c5c..43e9f67bb 100644
--- a/ipv4/defs_dragonfly.go
+++ b/ipv4/defs_dragonfly.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
diff --git a/ipv4/defs_freebsd.go b/ipv4/defs_freebsd.go
index b068087a4..05899b3b4 100644
--- a/ipv4/defs_freebsd.go
+++ b/ipv4/defs_freebsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
diff --git a/ipv4/defs_linux.go b/ipv4/defs_linux.go
index 7c8554d4b..fc869b019 100644
--- a/ipv4/defs_linux.go
+++ b/ipv4/defs_linux.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
diff --git a/ipv4/defs_netbsd.go b/ipv4/defs_netbsd.go
index a84630c5c..43e9f67bb 100644
--- a/ipv4/defs_netbsd.go
+++ b/ipv4/defs_netbsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
diff --git a/ipv4/defs_openbsd.go b/ipv4/defs_openbsd.go
index a84630c5c..43e9f67bb 100644
--- a/ipv4/defs_openbsd.go
+++ b/ipv4/defs_openbsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
diff --git a/ipv4/defs_solaris.go b/ipv4/defs_solaris.go
index 0ceadfce2..2494ff86a 100644
--- a/ipv4/defs_solaris.go
+++ b/ipv4/defs_solaris.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
diff --git a/ipv4/errors_other_test.go b/ipv4/errors_other_test.go
index 615435391..93a7f9d74 100644
--- a/ipv4/errors_other_test.go
+++ b/ipv4/errors_other_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !(aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris)
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package ipv4_test
diff --git a/ipv4/errors_unix_test.go b/ipv4/errors_unix_test.go
index 566e070a5..7cff0097c 100644
--- a/ipv4/errors_unix_test.go
+++ b/ipv4/errors_unix_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package ipv4_test
diff --git a/ipv4/gen.go b/ipv4/gen.go
index e7b053a17..121c7643e 100644
--- a/ipv4/gen.go
+++ b/ipv4/gen.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
//go:generate go run gen.go
diff --git a/ipv4/helper_posix_test.go b/ipv4/helper_posix_test.go
index 4f6ecc0fd..ab8ffd90d 100644
--- a/ipv4/helper_posix_test.go
+++ b/ipv4/helper_posix_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos
package ipv4_test
diff --git a/ipv4/helper_stub_test.go b/ipv4/helper_stub_test.go
index e47ddf7f3..791e6d4c0 100644
--- a/ipv4/helper_stub_test.go
+++ b/ipv4/helper_stub_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package ipv4_test
diff --git a/ipv4/icmp_stub.go b/ipv4/icmp_stub.go
index cd4ee6e1c..c2c4ce7ff 100644
--- a/ipv4/icmp_stub.go
+++ b/ipv4/icmp_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !linux
-// +build !linux
package ipv4
diff --git a/ipv4/payload_cmsg.go b/ipv4/payload_cmsg.go
index 1bb370e25..91c685e8f 100644
--- a/ipv4/payload_cmsg.go
+++ b/ipv4/payload_cmsg.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package ipv4
diff --git a/ipv4/payload_nocmsg.go b/ipv4/payload_nocmsg.go
index 53f0794eb..2afd4b50e 100644
--- a/ipv4/payload_nocmsg.go
+++ b/ipv4/payload_nocmsg.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos
package ipv4
diff --git a/ipv4/sockopt_posix.go b/ipv4/sockopt_posix.go
index eb07c1c02..82e2c3783 100644
--- a/ipv4/sockopt_posix.go
+++ b/ipv4/sockopt_posix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos
package ipv4
diff --git a/ipv4/sockopt_stub.go b/ipv4/sockopt_stub.go
index cf036893b..840108bf7 100644
--- a/ipv4/sockopt_stub.go
+++ b/ipv4/sockopt_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package ipv4
diff --git a/ipv4/sys_aix.go b/ipv4/sys_aix.go
index 02730cdfd..9244a68a3 100644
--- a/ipv4/sys_aix.go
+++ b/ipv4/sys_aix.go
@@ -4,7 +4,6 @@
// Added for go1.11 compatibility
//go:build aix
-// +build aix
package ipv4
diff --git a/ipv4/sys_asmreq.go b/ipv4/sys_asmreq.go
index 22322b387..645f254c6 100644
--- a/ipv4/sys_asmreq.go
+++ b/ipv4/sys_asmreq.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || windows
-// +build aix darwin dragonfly freebsd netbsd openbsd solaris windows
package ipv4
diff --git a/ipv4/sys_asmreq_stub.go b/ipv4/sys_asmreq_stub.go
index fde640142..48cfb6db2 100644
--- a/ipv4/sys_asmreq_stub.go
+++ b/ipv4/sys_asmreq_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !windows
-// +build !aix,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows
package ipv4
diff --git a/ipv4/sys_asmreqn.go b/ipv4/sys_asmreqn.go
index 54eb9901b..0b27b632f 100644
--- a/ipv4/sys_asmreqn.go
+++ b/ipv4/sys_asmreqn.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build darwin || freebsd || linux
-// +build darwin freebsd linux
package ipv4
diff --git a/ipv4/sys_asmreqn_stub.go b/ipv4/sys_asmreqn_stub.go
index dcb15f25a..303a5e2e6 100644
--- a/ipv4/sys_asmreqn_stub.go
+++ b/ipv4/sys_asmreqn_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !darwin && !freebsd && !linux
-// +build !darwin,!freebsd,!linux
package ipv4
diff --git a/ipv4/sys_bpf.go b/ipv4/sys_bpf.go
index fb11e324e..1b4780df4 100644
--- a/ipv4/sys_bpf.go
+++ b/ipv4/sys_bpf.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build linux
-// +build linux
package ipv4
diff --git a/ipv4/sys_bpf_stub.go b/ipv4/sys_bpf_stub.go
index fc53a0d33..b1f779b49 100644
--- a/ipv4/sys_bpf_stub.go
+++ b/ipv4/sys_bpf_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !linux
-// +build !linux
package ipv4
diff --git a/ipv4/sys_bsd.go b/ipv4/sys_bsd.go
index e191b2f14..b7b032d26 100644
--- a/ipv4/sys_bsd.go
+++ b/ipv4/sys_bsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build netbsd || openbsd
-// +build netbsd openbsd
package ipv4
diff --git a/ipv4/sys_ssmreq.go b/ipv4/sys_ssmreq.go
index 6a4e7abf9..a295e15ea 100644
--- a/ipv4/sys_ssmreq.go
+++ b/ipv4/sys_ssmreq.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build darwin || freebsd || linux || solaris
-// +build darwin freebsd linux solaris
package ipv4
diff --git a/ipv4/sys_ssmreq_stub.go b/ipv4/sys_ssmreq_stub.go
index 157159fd5..74bd454e2 100644
--- a/ipv4/sys_ssmreq_stub.go
+++ b/ipv4/sys_ssmreq_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !darwin && !freebsd && !linux && !solaris
-// +build !darwin,!freebsd,!linux,!solaris
package ipv4
diff --git a/ipv4/sys_stub.go b/ipv4/sys_stub.go
index d55085165..20af4074c 100644
--- a/ipv4/sys_stub.go
+++ b/ipv4/sys_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package ipv4
diff --git a/ipv4/zsys_aix_ppc64.go b/ipv4/zsys_aix_ppc64.go
index b7f2d6e5c..dd454025c 100644
--- a/ipv4/zsys_aix_ppc64.go
+++ b/ipv4/zsys_aix_ppc64.go
@@ -3,7 +3,6 @@
// Added for go1.11 compatibility
//go:build aix
-// +build aix
package ipv4
diff --git a/ipv4/zsys_linux_loong64.go b/ipv4/zsys_linux_loong64.go
index e15c22c74..54f9e1394 100644
--- a/ipv4/zsys_linux_loong64.go
+++ b/ipv4/zsys_linux_loong64.go
@@ -2,7 +2,6 @@
// cgo -godefs defs_linux.go
//go:build loong64
-// +build loong64
package ipv4
diff --git a/ipv4/zsys_linux_riscv64.go b/ipv4/zsys_linux_riscv64.go
index e2edebdb8..78374a525 100644
--- a/ipv4/zsys_linux_riscv64.go
+++ b/ipv4/zsys_linux_riscv64.go
@@ -2,7 +2,6 @@
// cgo -godefs defs_linux.go
//go:build riscv64
-// +build riscv64
package ipv4
diff --git a/ipv6/control_rfc2292_unix.go b/ipv6/control_rfc2292_unix.go
index 2733ddbe2..a8f04e7b3 100644
--- a/ipv6/control_rfc2292_unix.go
+++ b/ipv6/control_rfc2292_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build darwin
-// +build darwin
package ipv6
diff --git a/ipv6/control_rfc3542_unix.go b/ipv6/control_rfc3542_unix.go
index 9c90844aa..51fbbb1f1 100644
--- a/ipv6/control_rfc3542_unix.go
+++ b/ipv6/control_rfc3542_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package ipv6
diff --git a/ipv6/control_stub.go b/ipv6/control_stub.go
index b7e8643fc..eb28ce753 100644
--- a/ipv6/control_stub.go
+++ b/ipv6/control_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package ipv6
diff --git a/ipv6/control_unix.go b/ipv6/control_unix.go
index 63e475db8..9c73b8647 100644
--- a/ipv6/control_unix.go
+++ b/ipv6/control_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package ipv6
diff --git a/ipv6/defs_aix.go b/ipv6/defs_aix.go
index 97db07e8d..de171ce2c 100644
--- a/ipv6/defs_aix.go
+++ b/ipv6/defs_aix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
diff --git a/ipv6/defs_darwin.go b/ipv6/defs_darwin.go
index 1d31e22c1..3b9e6ba64 100644
--- a/ipv6/defs_darwin.go
+++ b/ipv6/defs_darwin.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
diff --git a/ipv6/defs_dragonfly.go b/ipv6/defs_dragonfly.go
index ddaed6597..b40d34b13 100644
--- a/ipv6/defs_dragonfly.go
+++ b/ipv6/defs_dragonfly.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
diff --git a/ipv6/defs_freebsd.go b/ipv6/defs_freebsd.go
index 6f6bc6dbc..fe9a0f70f 100644
--- a/ipv6/defs_freebsd.go
+++ b/ipv6/defs_freebsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
diff --git a/ipv6/defs_linux.go b/ipv6/defs_linux.go
index 0adcbd92d..b947c225a 100644
--- a/ipv6/defs_linux.go
+++ b/ipv6/defs_linux.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
diff --git a/ipv6/defs_netbsd.go b/ipv6/defs_netbsd.go
index ddaed6597..b40d34b13 100644
--- a/ipv6/defs_netbsd.go
+++ b/ipv6/defs_netbsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
diff --git a/ipv6/defs_openbsd.go b/ipv6/defs_openbsd.go
index ddaed6597..b40d34b13 100644
--- a/ipv6/defs_openbsd.go
+++ b/ipv6/defs_openbsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
diff --git a/ipv6/defs_solaris.go b/ipv6/defs_solaris.go
index 03193da9b..7981a0452 100644
--- a/ipv6/defs_solaris.go
+++ b/ipv6/defs_solaris.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in6_addr [16]byte /* in6_addr */
diff --git a/ipv6/errors_other_test.go b/ipv6/errors_other_test.go
index 5a87d7361..5f6c0cb27 100644
--- a/ipv6/errors_other_test.go
+++ b/ipv6/errors_other_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !(aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris)
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package ipv6_test
diff --git a/ipv6/errors_unix_test.go b/ipv6/errors_unix_test.go
index 978ae61f8..9e8efd313 100644
--- a/ipv6/errors_unix_test.go
+++ b/ipv6/errors_unix_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package ipv6_test
diff --git a/ipv6/gen.go b/ipv6/gen.go
index bd53468eb..2973dff5c 100644
--- a/ipv6/gen.go
+++ b/ipv6/gen.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
//go:generate go run gen.go
diff --git a/ipv6/helper_posix_test.go b/ipv6/helper_posix_test.go
index 8ca6a3c3c..f412a78cb 100644
--- a/ipv6/helper_posix_test.go
+++ b/ipv6/helper_posix_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos
package ipv6_test
diff --git a/ipv6/helper_stub_test.go b/ipv6/helper_stub_test.go
index 15e99fa94..9412a4cf5 100644
--- a/ipv6/helper_stub_test.go
+++ b/ipv6/helper_stub_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package ipv6_test
diff --git a/ipv6/helper_unix_test.go b/ipv6/helper_unix_test.go
index 5ccff9d9b..c2459e320 100644
--- a/ipv6/helper_unix_test.go
+++ b/ipv6/helper_unix_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package ipv6_test
diff --git a/ipv6/icmp_bsd.go b/ipv6/icmp_bsd.go
index 120bf8775..2814534a0 100644
--- a/ipv6/icmp_bsd.go
+++ b/ipv6/icmp_bsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || netbsd || openbsd
-// +build aix darwin dragonfly freebsd netbsd openbsd
package ipv6
diff --git a/ipv6/icmp_stub.go b/ipv6/icmp_stub.go
index d60136a90..c92c9b51e 100644
--- a/ipv6/icmp_stub.go
+++ b/ipv6/icmp_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package ipv6
diff --git a/ipv6/payload_cmsg.go b/ipv6/payload_cmsg.go
index b0692e430..be04e4d6a 100644
--- a/ipv6/payload_cmsg.go
+++ b/ipv6/payload_cmsg.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package ipv6
diff --git a/ipv6/payload_nocmsg.go b/ipv6/payload_nocmsg.go
index cd0ff5083..29b9ccf69 100644
--- a/ipv6/payload_nocmsg.go
+++ b/ipv6/payload_nocmsg.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos
package ipv6
diff --git a/ipv6/sockopt_posix.go b/ipv6/sockopt_posix.go
index 37c628713..34dfed588 100644
--- a/ipv6/sockopt_posix.go
+++ b/ipv6/sockopt_posix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows zos
package ipv6
diff --git a/ipv6/sockopt_stub.go b/ipv6/sockopt_stub.go
index 32fd8664c..a09c3aaf2 100644
--- a/ipv6/sockopt_stub.go
+++ b/ipv6/sockopt_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package ipv6
diff --git a/ipv6/sys_aix.go b/ipv6/sys_aix.go
index a47182afb..93c8efc46 100644
--- a/ipv6/sys_aix.go
+++ b/ipv6/sys_aix.go
@@ -4,7 +4,6 @@
// Added for go1.11 compatibility
//go:build aix
-// +build aix
package ipv6
diff --git a/ipv6/sys_asmreq.go b/ipv6/sys_asmreq.go
index 6ff9950d1..5c9cb4447 100644
--- a/ipv6/sys_asmreq.go
+++ b/ipv6/sys_asmreq.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris windows
package ipv6
diff --git a/ipv6/sys_asmreq_stub.go b/ipv6/sys_asmreq_stub.go
index 485290cb8..dc7049468 100644
--- a/ipv6/sys_asmreq_stub.go
+++ b/ipv6/sys_asmreq_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
package ipv6
diff --git a/ipv6/sys_bpf.go b/ipv6/sys_bpf.go
index b5661fb8f..e39f75f49 100644
--- a/ipv6/sys_bpf.go
+++ b/ipv6/sys_bpf.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build linux
-// +build linux
package ipv6
diff --git a/ipv6/sys_bpf_stub.go b/ipv6/sys_bpf_stub.go
index cb0066187..8532a8f5d 100644
--- a/ipv6/sys_bpf_stub.go
+++ b/ipv6/sys_bpf_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !linux
-// +build !linux
package ipv6
diff --git a/ipv6/sys_bsd.go b/ipv6/sys_bsd.go
index bde41a6ce..9f3bc2afd 100644
--- a/ipv6/sys_bsd.go
+++ b/ipv6/sys_bsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build dragonfly || netbsd || openbsd
-// +build dragonfly netbsd openbsd
package ipv6
diff --git a/ipv6/sys_ssmreq.go b/ipv6/sys_ssmreq.go
index 023488a49..b40f5c685 100644
--- a/ipv6/sys_ssmreq.go
+++ b/ipv6/sys_ssmreq.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || freebsd || linux || solaris || zos
-// +build aix darwin freebsd linux solaris zos
package ipv6
diff --git a/ipv6/sys_ssmreq_stub.go b/ipv6/sys_ssmreq_stub.go
index acdf2e5cf..6526aad58 100644
--- a/ipv6/sys_ssmreq_stub.go
+++ b/ipv6/sys_ssmreq_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !freebsd && !linux && !solaris && !zos
-// +build !aix,!darwin,!freebsd,!linux,!solaris,!zos
package ipv6
diff --git a/ipv6/sys_stub.go b/ipv6/sys_stub.go
index 5807bba39..76602c34e 100644
--- a/ipv6/sys_stub.go
+++ b/ipv6/sys_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package ipv6
diff --git a/ipv6/zsys_aix_ppc64.go b/ipv6/zsys_aix_ppc64.go
index f604b0f3b..668716df4 100644
--- a/ipv6/zsys_aix_ppc64.go
+++ b/ipv6/zsys_aix_ppc64.go
@@ -3,7 +3,6 @@
// Added for go1.11 compatibility
//go:build aix
-// +build aix
package ipv6
diff --git a/ipv6/zsys_linux_loong64.go b/ipv6/zsys_linux_loong64.go
index 598fbfa06..6a53284db 100644
--- a/ipv6/zsys_linux_loong64.go
+++ b/ipv6/zsys_linux_loong64.go
@@ -2,7 +2,6 @@
// cgo -godefs defs_linux.go
//go:build loong64
-// +build loong64
package ipv6
diff --git a/ipv6/zsys_linux_riscv64.go b/ipv6/zsys_linux_riscv64.go
index d4f78e405..13b347205 100644
--- a/ipv6/zsys_linux_riscv64.go
+++ b/ipv6/zsys_linux_riscv64.go
@@ -2,7 +2,6 @@
// cgo -godefs defs_linux.go
//go:build riscv64
-// +build riscv64
package ipv6
diff --git a/lif/address.go b/lif/address.go
index 8eaddb508..0ed62a2c4 100644
--- a/lif/address.go
+++ b/lif/address.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build solaris
-// +build solaris
package lif
diff --git a/lif/address_test.go b/lif/address_test.go
index fdaa7f3aa..0e99b8d34 100644
--- a/lif/address_test.go
+++ b/lif/address_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build solaris
-// +build solaris
package lif
diff --git a/lif/binary.go b/lif/binary.go
index f31ca3ad0..8a6c45606 100644
--- a/lif/binary.go
+++ b/lif/binary.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build solaris
-// +build solaris
package lif
diff --git a/lif/defs_solaris.go b/lif/defs_solaris.go
index dbed7c86e..6bc8fa8e6 100644
--- a/lif/defs_solaris.go
+++ b/lif/defs_solaris.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
// +godefs map struct_in_addr [4]byte /* in_addr */
// +godefs map struct_in6_addr [16]byte /* in6_addr */
diff --git a/lif/lif.go b/lif/lif.go
index f1fce48b3..e9f2a9e0e 100644
--- a/lif/lif.go
+++ b/lif/lif.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build solaris
-// +build solaris
// Package lif provides basic functions for the manipulation of
// logical network interfaces and interface addresses on Solaris.
diff --git a/lif/link.go b/lif/link.go
index 00b78545b..d0c615a0b 100644
--- a/lif/link.go
+++ b/lif/link.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build solaris
-// +build solaris
package lif
diff --git a/lif/link_test.go b/lif/link_test.go
index 40b3f3ff2..fe56697f8 100644
--- a/lif/link_test.go
+++ b/lif/link_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build solaris
-// +build solaris
package lif
diff --git a/lif/sys.go b/lif/sys.go
index d0b532d9d..caba2fe90 100644
--- a/lif/sys.go
+++ b/lif/sys.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build solaris
-// +build solaris
package lif
diff --git a/lif/syscall.go b/lif/syscall.go
index 8d03b4aa9..329a65fe6 100644
--- a/lif/syscall.go
+++ b/lif/syscall.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build solaris
-// +build solaris
package lif
diff --git a/nettest/conntest_test.go b/nettest/conntest_test.go
index 7c5aeb9b3..c57e64004 100644
--- a/nettest/conntest_test.go
+++ b/nettest/conntest_test.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.8
-// +build go1.8
package nettest
diff --git a/nettest/nettest_stub.go b/nettest/nettest_stub.go
index 6e3a9312b..1725b6aa1 100644
--- a/nettest/nettest_stub.go
+++ b/nettest/nettest_stub.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package nettest
diff --git a/nettest/nettest_unix.go b/nettest/nettest_unix.go
index b1cb8b2f3..9ba269d02 100644
--- a/nettest/nettest_unix.go
+++ b/nettest/nettest_unix.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package nettest
diff --git a/publicsuffix/data/children b/publicsuffix/data/children
index 1038c561a..08261bffd 100644
Binary files a/publicsuffix/data/children and b/publicsuffix/data/children differ
diff --git a/publicsuffix/data/nodes b/publicsuffix/data/nodes
index 34751cd5b..1dae6ede8 100644
Binary files a/publicsuffix/data/nodes and b/publicsuffix/data/nodes differ
diff --git a/publicsuffix/data/text b/publicsuffix/data/text
index 124dcd61f..7e516413f 100644
--- a/publicsuffix/data/text
+++ b/publicsuffix/data/text
@@ -1 +1 @@
-billustrationionjukudoyamakeupowiathletajimageandsoundandvision-riopretobishimagentositecnologiabiocelotenkawabipanasonicatfoodnetworkinggroupperbirdartcenterprisecloudaccesscamdvrcampaniabirkenesoddtangenovarahkkeravjuegoshikikiraraholtalenishikatakazakindependent-revieweirbirthplaceu-1bitbucketrzynishikatsuragirlyuzawabitternidiscoverybjarkoybjerkreimdbaltimore-og-romsdalp1bjugnishikawazukamishihoronobeautydalwaysdatabaseballangenkainanaejrietisalatinabenogatabitorderblackfridaybloombergbauernishimerabloxcms3-website-us-west-2blushakotanishinomiyashironocparachutingjovikarateu-2bmoattachmentsalangenishinoomotegovtattoolforgerockartuzybmsalon-1bmwellbeingzoneu-3bnrwesteuropenairbusantiquesaltdalomzaporizhzhedmarkaratsuginamikatagamilanotairesistanceu-4bondigitaloceanspacesaludishangrilanciabonnishinoshimatsusakahoginankokubunjindianapolis-a-bloggerbookonlinewjerseyboomlahppiacenzachpomorskienishiokoppegardiskussionsbereichattanooganordkapparaglidinglassassinationalheritageu-north-1boschaefflerdalondonetskarelianceu-south-1bostik-serveronagasukevje-og-hornnesalvadordalibabalatinord-aurdalipaywhirlondrinaplesknsalzburgleezextraspace-to-rentalstomakomaibarabostonakijinsekikogentappssejnyaarparalleluxembourglitcheltenham-radio-opensocialorenskogliwicebotanicalgardeno-staginglobodoes-itcouldbeworldisrechtranakamurataiwanairforcechireadthedocsxeroxfinitybotanicgardenishitosashimizunaminamiawajikindianmarketinglogowestfalenishiwakindielddanuorrindigenamsskoganeindustriabotanyanagawallonieruchomoscienceandindustrynissandiegoddabouncemerckmsdnipropetrovskjervoyageorgeorgiabounty-fullensakerrypropertiesamegawaboutiquebecommerce-shopselectaxihuanissayokkaichintaifun-dnsaliasamnangerboutireservditchyouriparasiteboyfriendoftheinternetflixjavaldaostathellevangerbozen-sudtirolottokorozawabozen-suedtirolouvreisenissedalovepoparisor-fronisshingucciprianiigataipeidsvollovesickariyakumodumeloyalistoragebplaceducatorprojectcmembersampalermomahaccapooguybrandywinevalleybrasiliadboxosascoli-picenorddalpusercontentcp4bresciaokinawashirosatobamagazineuesamsclubartowestus2brindisibenikitagataikikuchikumagayagawalmartgorybristoloseyouriparliamentjeldsundivtasvuodnakaniikawatanagurabritishcolumbialowiezaganiyodogawabroadcastlebtimnetzlgloomy-routerbroadwaybroke-itvedestrandivttasvuotnakanojohanamakindlefrakkestadiybrokerbrothermesaverdeatnulmemergencyachtsamsungloppennebrowsersafetymarketsandnessjoenl-ams-1brumunddalublindesnesandoybrunelastxn--0trq7p7nnbrusselsandvikcoromantovalle-daostavangerbruxellesanfranciscofreakunekobayashikaoirmemorialucaniabryanskodjedugit-pagespeedmobilizeroticagliaricoharuovatlassian-dev-builderscbglugsjcbnpparibashkiriabrynewmexicoacharterbuzzwfarmerseinebwhalingmbhartiffany-2bzhitomirbzzcodyn-vpndnsantacruzsantafedjeffersoncoffeedbackdropocznordlandrudupontariobranconavstackasaokamikoaniikappudownloadurbanamexhibitioncogretakamatsukawacollectioncolognewyorkshirebungoonordre-landurhamburgrimstadynamisches-dnsantamariakecolonialwilliamsburgripeeweeklylotterycoloradoplateaudnedalncolumbusheycommunexus-3community-prochowicecomobaravendbambleborkapsicilyonagoyauthgear-stagingivestbyglandroverhallair-traffic-controlleyombomloabaths-heilbronnoysunddnslivegarsheiheijibigawaustraliaustinnfshostrolekamisatokaizukameyamatotakadaustevollivornowtv-infolldalolipopmcdircompanychipstmncomparemarkerryhotelsantoandrepbodynaliasnesoddenmarkhangelskjakdnepropetrovskiervaapsteigenflfannefrankfurtjxn--12cfi8ixb8lutskashibatakashimarshallstatebankashiharacomsecaaskimitsubatamibuildingriwatarailwaycondoshichinohealth-carereformemsettlersanukindustriesteamfamberlevagangaviikanonjinfinitigotembaixadaconferenceconstructionconsuladogadollsaobernardomniweatherchanneluxuryconsultanthropologyconsultingroks-thisayamanobeokakegawacontactkmaxxn--12co0c3b4evalled-aostamayukinsuregruhostingrondarcontagematsubaravennaharimalborkashiwaracontemporaryarteducationalchikugodonnakaiwamizawashtenawsmppl-wawdev-myqnapcloudcontrolledogawarabikomaezakirunoopschlesischesaogoncartoonartdecologiacontractorskenconventureshinodearthickashiwazakiyosatokamachilloutsystemscloudsitecookingchannelsdvrdnsdojogaszkolancashirecifedexetercoolblogdnsfor-better-thanawassamukawatarikuzentakatairavpagecooperativano-frankivskygearapparochernigovernmentksatxn--1ck2e1bananarepublic-inquiryggeebinatsukigatajimidsundevelopmentatarantours3-external-1copenhagencyclopedichiropracticatholicaxiashorokanaiecoproductionsaotomeinforumzcorporationcorsicahcesuoloanswatch-and-clockercorvettenrissagaeroclubmedecincinnativeamericanantiquest-le-patron-k3sapporomuracosenzamamidorittoeigersundynathomebuiltwithdarkasserverrankoshigayaltakasugaintelligencecosidnshome-webservercellikescandypoppdaluzerncostumedicallynxn--1ctwolominamatargets-itlon-2couchpotatofriesardegnarutomobegetmyiparsardiniacouncilvivanovoldacouponsarlcozoracq-acranbrookuwanalyticsarpsborgrongausdalcrankyowariasahikawatchandclockasukabeauxartsandcraftsarufutsunomiyawakasaikaitabashijonawatecrdyndns-at-homedepotaruinterhostsolutionsasayamatta-varjjatmpartinternationalfirearmsaseboknowsitallcreditcardyndns-at-workshoppingrossetouchigasakitahiroshimansionsaskatchewancreditunioncremonashgabadaddjaguarqcxn--1lqs03ncrewhmessinarashinomutashinaintuitoyosatoyokawacricketnedalcrimeast-kazakhstanangercrotonecrownipartsassarinuyamashinazawacrsaudacruisesauheradyndns-blogsitextilegnicapetownnews-stagingroundhandlingroznycuisinellancasterculturalcentertainmentoyotapartysvardocuneocupcakecuritibabymilk3curvallee-d-aosteinkjerusalempresashibetsurugashimaringatlantajirinvestmentsavannahgacutegirlfriendyndns-freeboxoslocalzonecymrulvikasumigaurawa-mazowszexnetlifyinzairtrafficplexus-1cyonabarumesswithdnsaveincloudyndns-homednsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacyouthruherecipescaracaltanissettaishinomakilovecollegefantasyleaguernseyfembetsukumiyamazonawsglobalacceleratorahimeshimabaridagawatchesciencecentersciencehistoryfermockasuyamegurownproviderferraraferraris-a-catererferrerotikagoshimalopolskanlandyndns-picsaxofetsundyndns-remotewdyndns-ipasadenaroyfgujoinvilleitungsenfhvalerfidontexistmein-iservschulegallocalhostrodawarafieldyndns-serverdalfigueresindevicenzaolkuszczytnoipirangalsaceofilateliafilegear-augustowhoswholdingsmall-webthingscientistordalfilegear-debianfilegear-gbizfilegear-iefilegear-jpmorganfilegear-sg-1filminamiechizenfinalfinancefineartscrapper-sitefinlandyndns-weblikes-piedmonticellocus-4finnoyfirebaseappaviancarrdyndns-wikinkobearalvahkijoetsuldalvdalaskanittedallasalleasecuritytacticschoenbrunnfirenetoystre-slidrettozawafirenzefirestonefirewebpaascrappingulenfirmdaleikangerfishingoldpoint2thisamitsukefitjarvodkafjordyndns-workangerfitnessettlementozsdellogliastradingunmanxn--1qqw23afjalerfldrvalleeaosteflekkefjordyndns1flesberguovdageaidnunjargaflickragerogerscrysecretrosnubar0flierneflirfloginlinefloppythonanywhereggio-calabriafloraflorencefloridatsunangojomedicinakamagayahabackplaneapplinzis-a-celticsfanfloripadoval-daostavalleyfloristanohatakahamalselvendrellflorokunohealthcareerscwienflowerservehalflifeinsurancefltrani-andria-barletta-trani-andriaflynnhosting-clusterfnchiryukyuragifuchungbukharanzanfndynnschokokekschokoladenfnwkaszubytemarkatowicefoolfor-ourfor-somedio-campidano-mediocampidanomediofor-theaterforexrothachijolsterforgotdnservehttpbin-butterforli-cesena-forlicesenaforlillesandefjordynservebbscholarshipschoolbusinessebyforsaleirfjordynuniversityforsandasuolodingenfortalfortefortmissoulangevagrigentomologyeonggiehtavuoatnagahamaroygardencowayfortworthachinoheavyfosneservehumourfotraniandriabarlettatraniandriafoxfordecampobassociatest-iserveblogsytemp-dnserveirchitachinakagawashingtondchernivtsiciliafozfr-par-1fr-par-2franamizuhobby-sitefrancaiseharafranziskanerimalvikatsushikabedzin-addrammenuorochesterfredrikstadtvserveminecraftranoyfreeddnsfreebox-oservemp3freedesktopfizerfreemasonryfreemyiphosteurovisionfreesitefreetlservep2pgfoggiafreiburgushikamifuranorfolkebibleksvikatsuyamarugame-hostyhostingxn--2m4a15efrenchkisshikirkeneservepicservequakefreseniuscultureggio-emilia-romagnakasatsunairguardiannakadomarinebraskaunicommbankaufentigerfribourgfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganservesarcasmatartanddesignfrognfrolandynv6from-akrehamnfrom-alfrom-arfrom-azurewebsiteshikagamiishibukawakepnoorfrom-capitalonewportransipharmacienservicesevastopolefrom-coalfrom-ctranslatedynvpnpluscountryestateofdelawareclaimschoolsztynsettsupportoyotomiyazakis-a-candidatefrom-dchitosetodayfrom-dediboxafrom-flandersevenassisienarvikautokeinoticeablewismillerfrom-gaulardalfrom-hichisochikuzenfrom-iafrom-idyroyrvikingruenoharafrom-ilfrom-in-berlindasewiiheyaizuwakamatsubushikusakadogawafrom-ksharpharmacyshawaiijimarcheapartmentshellaspeziafrom-kyfrom-lanshimokawafrom-mamurogawatsonfrom-mdfrom-medizinhistorischeshimokitayamattelekommunikationfrom-mifunefrom-mnfrom-modalenfrom-mshimonitayanagit-reposts-and-telecommunicationshimonosekikawafrom-mtnfrom-nchofunatoriginstantcloudfrontdoorfrom-ndfrom-nefrom-nhktistoryfrom-njshimosuwalkis-a-chefarsundyndns-mailfrom-nminamifuranofrom-nvalleedaostefrom-nynysagamiharafrom-ohdattorelayfrom-oketogolffanshimotsukefrom-orfrom-padualstackazoologicalfrom-pratogurafrom-ris-a-conservativegashimotsumayfirstockholmestrandfrom-schmidtre-gauldalfrom-sdscloudfrom-tnfrom-txn--2scrj9chonanbunkyonanaoshimakanegasakikugawaltervistailscaleforcefrom-utsiracusaikirovogradoyfrom-vald-aostarostwodzislawildlifestylefrom-vtransportefrom-wafrom-wiardwebview-assetshinichinanfrom-wvanylvenneslaskerrylogisticshinjournalismartlabelingfrom-wyfrosinonefrostalowa-wolawafroyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairkitapps-auction-rancherkasydneyfujinomiyadattowebhoptogakushimotoganefujiokayamandalfujisatoshonairlinedre-eikerfujisawafujishiroishidakabiratoridedyn-berlincolnfujitsuruokazakiryuohkurafujiyoshidavvenjargap-east-1fukayabeardubaiduckdnsncfdfukuchiyamadavvesiidappnodebalancertmgrazimutheworkpccwilliamhillfukudomigawafukuis-a-cpalacefukumitsubishigakisarazure-mobileirvikazteleportlligatransurlfukuokakamigaharafukuroishikarikaturindalfukusakishiwadazaifudaigokaseljordfukuyamagatakaharunusualpersonfunabashiriuchinadafunagatakahashimamakisofukushimangonnakatombetsumy-gatewayfunahashikamiamakusatsumasendaisenergyfundaciofunkfeuerfuoiskujukuriyamangyshlakasamatsudoomdnstracefuosskoczowinbar1furubirafurudonostiaafurukawajimaniwakuratefusodegaurafussaintlouis-a-anarchistoireggiocalabriafutabayamaguchinomihachimanagementrapaniizafutboldlygoingnowhere-for-morenakatsugawafuttsurutaharafuturecmshinjukumamotoyamashikefuturehostingfuturemailingfvghamurakamigoris-a-designerhandcraftedhandsonyhangglidinghangoutwentehannanmokuizumodenaklodzkochikuseihidorahannorthwesternmutualhanyuzenhapmircloudletshintokushimahappounzenharvestcelebrationhasamap-northeast-3hasaminami-alpshintomikasaharahashbangryhasudahasura-apphiladelphiaareadmyblogspotrdhasvikfh-muensterhatogayahoooshikamaishimofusartshinyoshitomiokamisunagawahatoyamazakitakatakanabeatshiojirishirifujiedahatsukaichikaiseiyoichimkentrendhostinghattfjelldalhayashimamotobusellfylkesbiblackbaudcdn-edgestackhero-networkisboringhazuminobushistoryhelplfinancialhelsinkitakyushuaiahembygdsforbundhemneshioyanaizuerichardlimanowarudahemsedalhepforgeblockshirahamatonbetsurgeonshalloffameiwamasoyheroyhetemlbfanhgtvaohigashiagatsumagoianiahigashichichibuskerudhigashihiroshimanehigashiizumozakitamigrationhigashikagawahigashikagurasoedahigashikawakitaaikitamotosunndalhigashikurumeeresinstaginghigashimatsushimarburghigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshirakokonoehigashinarusells-for-lesshiranukamitondabayashiogamagoriziahigashinehigashiomitamanortonsberghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitanakagusukumodernhigashitsunosegawahigashiurausukitashiobarahigashiyamatokoriyamanashifteditorxn--30rr7yhigashiyodogawahigashiyoshinogaris-a-doctorhippyhiraizumisatohnoshoohirakatashinagawahiranairportland-4-salernogiessennanjobojis-a-financialadvisor-aurdalhirarahiratsukaerusrcfastlylbanzaicloudappspotagerhirayaitakaokalmykiahistorichouseshiraois-a-geekhakassiahitachiomiyagildeskaliszhitachiotagonohejis-a-greenhitraeumtgeradegreehjartdalhjelmelandholeckodairaholidayholyhomegoodshiraokamitsuehomeiphilatelyhomelinkyard-cloudjiffyresdalhomelinuxn--32vp30hachiojiyahikobierzycehomeofficehomesecuritymacaparecidahomesecuritypchoseikarugamvikarlsoyhomesenseeringhomesklepphilipsynology-diskstationhomeunixn--3bst00minamiiserniahondahongooglecodebergentinghonjyoitakarazukaluganskharkivaporcloudhornindalhorsells-for-ustkanmakiwielunnerhortendofinternet-dnshiratakahagitapphoenixn--3ds443ghospitalhoteleshishikuis-a-guruhotelwithflightshisognehotmailhoyangerhoylandetakasagophonefosshisuifuettertdasnetzhumanitieshitaramahungryhurdalhurumajis-a-hard-workershizukuishimogosenhyllestadhyogoris-a-hunterhyugawarahyundaiwafuneis-into-carsiiitesilkharkovaresearchaeologicalvinklein-the-bandairtelebitbridgestoneenebakkeshibechambagricultureadymadealstahaugesunderseaportsinfolionetworkdalaheadjudygarlandis-into-cartoonsimple-urlis-into-gamesserlillyis-leetrentin-suedtirolis-lostre-toteneis-a-lawyeris-not-certifiedis-savedis-slickhersonis-uberleetrentino-a-adigeis-very-badajozis-a-liberalis-very-evillageis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandovre-eikerisleofmanaustdaljellybeanjenv-arubahccavuotnagaragusabaerobaticketsirdaljeonnamerikawauejetztrentino-aadigejevnakershusdecorativeartslupskhmelnytskyivarggatrentino-alto-adigejewelryjewishartgalleryjfkhplaystation-cloudyclusterjgorajlljls-sto1jls-sto2jls-sto3jmphotographysiojnjaworznospamproxyjoyentrentino-altoadigejoyokaichibajddarchitecturealtorlandjpnjprslzjurkotohiradomainstitutekotourakouhokutamamurakounosupabasembokukizunokunimilitarykouyamarylhurstjordalshalsenkouzushimasfjordenkozagawakozakis-a-llamarnardalkozowindowskrakowinnersnoasakatakkokamiminersokndalkpnkppspbarcelonagawakkanaibetsubamericanfamilyds3-fips-us-gov-west-1krasnikahokutokashikis-a-musiciankrasnodarkredstonekrelliankristiansandcatsolarssonkristiansundkrodsheradkrokstadelvalle-aostatic-accessolognekryminamiizukaminokawanishiaizubangekumanotteroykumatorinovecoregontrailroadkumejimashikis-a-nascarfankumenantokonamegatakatoris-a-nursells-itrentin-sud-tirolkunisakis-a-painteractivelvetrentin-sudtirolkunitachiaraindropilotsolundbecknx-serversellsyourhomeftphxn--3e0b707ekunitomigusukuleuvenetokigawakunneppuboliviajessheimpertrixcdn77-secureggioemiliaromagnamsosnowiechristiansburgminakamichiharakunstsammlungkunstunddesignkuokgroupimientaketomisatoolsomakurehabmerkurgankurobeeldengeluidkurogimimatakatsukis-a-patsfankuroisoftwarezzoologykuromatsunais-a-personaltrainerkuronkurotakikawasakis-a-photographerokussldkushirogawakustanais-a-playershiftcryptonomichigangwonkusupersalezajskomakiyosemitekutchanelkutnowruzhgorodeokuzumakis-a-republicanonoichinomiyakekvafjordkvalsundkvamscompute-1kvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsomnatalkzmisakis-a-soxfanmisasaguris-a-studentalmisawamisconfusedmishimasudamissilemisugitokuyamatsumaebashikshacknetrentino-sued-tirolmitakeharamitourismilemitoyoakemiuramiyazurecontainerdpolicemiyotamatsukuris-a-teacherkassyno-dshowamjondalenmonstermontrealestatefarmequipmentrentino-suedtirolmonza-brianzapposor-odalmonza-e-della-brianzaptokyotangotpantheonsitemonzabrianzaramonzaebrianzamonzaedellabrianzamoonscalebookinghostedpictetrentinoa-adigemordoviamoriyamatsumotofukemoriyoshiminamiashigaramormonmouthachirogatakamoriokakudamatsuemoroyamatsunomortgagemoscowiosor-varangermoseushimodatemosjoenmoskenesorfoldmossorocabalena-devicesorreisahayakawakamiichikawamisatottoris-a-techietis-a-landscaperspectakasakitchenmosvikomatsushimarylandmoteginowaniihamatamakinoharamoviemovimientolgamozilla-iotrentinoaadigemtranbytomaritimekeepingmuginozawaonsensiositemuikaminoyamaxunispacemukoebenhavnmulhouseoullensvanguardmunakatanemuncienciamuosattemupinbarclaycards3-sa-east-1murmanskomforbar2murotorcraftrentinoalto-adigemusashinoharamuseetrentinoaltoadigemuseumverenigingmusicargodaddyn-o-saurlandesortlandmutsuzawamy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoruminamimakis-a-rockstarachowicemydattolocalcertificationmyddnsgeekgalaxymydissentrentinos-tirolmydobissmarterthanyoumydrobofageologymydsoundcastronomy-vigorlicemyeffectrentinostirolmyfastly-terrariuminamiminowamyfirewalledreplittlestargardmyforuminamioguni5myfritzmyftpaccessouthcarolinaturalhistorymuseumcentermyhome-servermyjinomykolaivencloud66mymailermymediapchristmasakillucernemyokohamamatsudamypepinkommunalforbundmypetsouthwest1-uslivinghistorymyphotoshibalashovhadanorth-kazakhstanmypicturestaurantrentinosud-tirolmypsxn--3pxu8kommunemysecuritycamerakermyshopblocksowamyshopifymyspreadshopwarendalenugmythic-beastspectruminamisanrikubetsuppliesoomytis-a-bookkeepermaritimodspeedpartnermytuleap-partnersphinxn--41amyvnchromediatechnologymywirepaircraftingvollohmusashimurayamashikokuchuoplantationplantspjelkavikomorotsukagawaplatformsharis-a-therapistoiaplatter-appinokofuefukihaboromskogplatterpioneerplazaplcube-serversicherungplumbingoplurinacionalpodhalepodlasiellaktyubinskiptveterinairealmpmnpodzonepohlpoivronpokerpokrovskomvuxn--3hcrj9choyodobashichikashukujitawaraumalatvuopmicrosoftbankarmoypoliticarrierpolitiendapolkowicepoltavalle-d-aostaticspydebergpomorzeszowitdkongsbergponpesaro-urbino-pesarourbinopesaromasvuotnarusawapordenonepornporsangerporsangugeporsgrunnanyokoshibahikariwanumatakinouepoznanpraxis-a-bruinsfanprdpreservationpresidioprgmrprimetelemarkongsvingerprincipeprivatizehealthinsuranceprofesionalprogressivestfoldpromombetsupplypropertyprotectionprotonetrentinosued-tirolprudentialpruszkowithgoogleapiszprvcyberprzeworskogpulawypunyufuelveruminamiuonumassa-carrara-massacarraramassabuyshousesopotrentino-sud-tirolpupugliapussycateringebuzentsujiiepvhadselfiphdfcbankazunoticiashinkamigototalpvtrentinosuedtirolpwchungnamdalseidsbergmodellingmxn--11b4c3dray-dnsupdaterpzqhaebaruericssongdalenviknakayamaoris-a-cubicle-slavellinodeobjectshinshinotsurfashionstorebaselburguidefinimamateramochizukimobetsumidatlantichirurgiens-dentistes-en-franceqldqotoyohashimotoshimatsuzakis-an-accountantshowtimelbourneqponiatowadaqslgbtrentinsud-tirolqualifioappippueblockbusternopilawaquickconnectrentinsudtirolquicksytesrhtrentinsued-tirolquipelementsrltunestuff-4-saletunkonsulatrobeebyteappigboatsmolaquilanxessmushcdn77-sslingturystykaniepcetuscanytushuissier-justicetuvalleaostaverntuxfamilytwmailvestvagoyvevelstadvibo-valentiavibovalentiavideovillastufftoread-booksnestorfjordvinnicasadelamonedagestangevinnytsiavipsinaappiwatevirginiavirtual-uservecounterstrikevirtualcloudvirtualservervirtualuserveexchangevirtuelvisakuhokksundviterbolognagasakikonaikawagoevivianvivolkenkundenvixn--42c2d9avlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavminanovologdanskonyveloftrentino-stirolvolvolkswagentstuttgartrentinsuedtirolvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiecircustomer-ocimmobilienwixsitewloclawekoobindalwmcloudwmflabsurnadalwoodsidelmenhorstabackyardsurreyworse-thandawowithyoutuberspacekitagawawpdevcloudwpenginepoweredwphostedmailwpmucdnpixolinodeusercontentrentinosudtirolwpmudevcdnaccessokanagawawritesthisblogoipizzawroclawiwatsukiyonoshiroomgwtcirclerkstagewtfastvps-serverisignwuozuwzmiuwajimaxn--4gbriminingxn--4it168dxn--4it797kooris-a-libertarianxn--4pvxs4allxn--54b7fta0ccivilaviationredumbrellajollamericanexpressexyxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49civilisationrenderxn--5rtq34koperviklabudhabikinokawachinaganoharamcocottempurlxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264civilizationthewifiatmallorcafederation-webspacexn--80aaa0cvacationsusonoxn--80adxhksuzakananiimiharuxn--80ao21axn--80aqecdr1axn--80asehdbarclays3-us-east-2xn--80aswgxn--80aukraanghkembuchikujobservableusercontentrevisohughestripperxn--8dbq2axn--8ltr62koryokamikawanehonbetsuwanouchijiwadeliveryxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisenbahnxn--90a3academiamicable-modemoneyxn--90aeroportalabamagasakishimabaraffleentry-snowplowiczeladzxn--90aishobarakawaharaoxn--90amckinseyxn--90azhytomyrxn--9dbhblg6dietritonxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byandexcloudxn--asky-iraxn--aurskog-hland-jnbarefootballooningjerstadgcapebretonamicrolightingjesdalombardiadembroideryonagunicloudiherokuappanamasteiermarkaracoldwarszawauthgearappspacehosted-by-previderxn--avery-yuasakuragawaxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsuzukanazawaxn--bck1b9a5dre4civilwarmiasadoesntexisteingeekarpaczest-a-la-maisondre-landrayddns5yxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyaotsurgeryxn--bjddar-ptargithubpreviewsaitohmannore-og-uvdalxn--blt-elabourxn--bmlo-graingerxn--bod-2naturalsciencesnaturellesuzukis-an-actorxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-acornxn--brum-voagatroandinosaureportrentoyonakagyokutoyakomaganexn--btsfjord-9zaxn--bulsan-sdtirol-nsbaremetalpha-myqnapcloud9guacuiababia-goracleaningitpagexlimoldell-ogliastraderxn--c1avgxn--c2br7gxn--c3s14mincomcastreserve-onlinexn--cck2b3bargainstances3-us-gov-west-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-an-actresshwindmillxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--czr694barreaudiblebesbydgoszczecinemagnethnologyoriikaragandauthordalandroiddnss3-ap-southeast-2ix4432-balsan-suedtirolimiteddnskinggfakefurniturecreationavuotnaritakoelnayorovigotsukisosakitahatakahatakaishimoichinosekigaharaurskog-holandingitlaborxn--czrs0trogstadxn--czru2dxn--czrw28barrel-of-knowledgeappgafanquanpachicappacificurussiautomotivelandds3-ca-central-16-balsan-sudtirollagdenesnaaseinet-freaks3-ap-southeast-123websiteleaf-south-123webseiteckidsmynasushiobarackmazerbaijan-mayen-rootaribeiraogakibichuobiramusementdllpages3-ap-south-123sitewebhareidfjordvagsoyerhcloudd-dnsiskinkyolasiteastcoastaldefenceastus2038xn--d1acj3barrell-of-knowledgecomputerhistoryofscience-fictionfabricafjs3-us-west-1xn--d1alfaromeoxn--d1atromsakegawaxn--d5qv7z876clanbibaidarmeniaxn--davvenjrga-y4axn--djrs72d6uyxn--djty4kosaigawaxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmailukowhitesnow-dnsangohtawaramotoineppubtlsanjotelulubin-brbambinagisobetsuitagajoburgjerdrumcprequalifymein-vigorgebetsukuibmdeveloperauniteroizumizakinderoyomitanobninskanzakiyokawaraustrheimatunduhrennebulsan-suedtirololitapunk123kotisivultrobjectselinogradimo-siemenscaledekaascolipiceno-ipifony-1337xn--eckvdtc9dxn--efvn9svalbardunloppaderbornxn--efvy88hagakhanamigawaxn--ehqz56nxn--elqq16hagebostadxn--eveni-0qa01gaxn--f6qx53axn--fct429kosakaerodromegallupaasdaburxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsvchurchaseljeepsondriodejaneirockyotobetsuliguriaxn--fiq64barsycenterprisesakievennodesadistcgrouplidlugolekagaminord-frontierxn--fiqs8sveioxn--fiqz9svelvikoninjambylxn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbssvizzeraxn--forlcesena-c8axn--fpcrj9c3dxn--frde-grandrapidsvn-repostorjcloud-ver-jpchowderxn--frna-woaraisaijosoyroroswedenxn--frya-hraxn--fzc2c9e2cleverappsannanxn--fzys8d69uvgmailxn--g2xx48clicketcloudcontrolapparmatsuuraxn--gckr3f0fauskedsmokorsetagayaseralingenoamishirasatogliattipschulserverxn--gecrj9clickrisinglesannohekinannestadraydnsanokaruizawaxn--ggaviika-8ya47haibarakitakamiizumisanofidelitysfjordxn--gildeskl-g0axn--givuotna-8yasakaiminatoyookaneyamazoexn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-an-anarchistoricalsocietysnesigdalxn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45br5cylxn--gnstigliefern-wobihirosakikamijimatsushigexn--h-2failxn--h1aeghair-surveillancexn--h1ahnxn--h1alizxn--h2breg3eveneswidnicasacampinagrandebungotakadaemongolianxn--h2brj9c8clinichippubetsuikilatironporterxn--h3cuzk1digickoseis-a-linux-usershoujis-a-knightpointtohoboleslawieconomiastalbanshizuokamogawaxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinewhampshirealtychyattorneyagawakuyabukihokumakogeniwaizumiotsurugimbalsfjordeportexaskoyabeagleboardetroitskypecorivneatonoshoes3-eu-west-3utilitiesquare7xn--hebda8basicserversaillesjabbottateshinanomachildrensgardenhlfanhsbc66xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-aptibleangaviikadenaamesjevuemielnoboribetsuckswidnikkolobrzegersundxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasugithubusercontentromsojamisonxn--io0a7is-an-artistgstagexn--j1adpkomonotogawaxn--j1aefbsbxn--1lqs71dyndns-office-on-the-webhostingrpassagensavonarviikamiokameokamakurazakiwakunigamihamadaxn--j1ael8basilicataniautoscanadaeguambulancentralus-2xn--j1amhakatanorthflankddiamondshinshiroxn--j6w193gxn--jlq480n2rgxn--jlq61u9w7basketballfinanzgorzeleccodespotenzakopanewspaperxn--jlster-byasuokannamihokkaidopaaskvollxn--jrpeland-54axn--jvr189miniserversusakis-a-socialistg-builderxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45brj9cistrondheimperiaxn--koluokta-7ya57hakodatexn--kprw13dxn--kpry57dxn--kput3is-an-engineeringxn--krager-gyatominamibosogndalxn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdevcloudfunctionsimplesitexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsukanoyakagexn--kvnangen-k0axn--l-1fairwindswiebodzin-dslattuminamiyamashirokawanabeepilepsykkylvenicexn--l1accentureklamborghinikolaeventswinoujscienceandhistoryxn--laheadju-7yatsushiroxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52batochigifts3-us-west-2xn--lesund-huaxn--lgbbat1ad8jdfaststackschulplattformetacentrumeteorappassenger-associationxn--lgrd-poacctrusteexn--lhppi-xqaxn--linds-pramericanartrvestnestudioxn--lns-qlavagiskexn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacliniquedapliexn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddeswisstpetersburgxn--mgb9awbfbx-ostrowwlkpmguitarschwarzgwangjuifminamidaitomanchesterxn--mgba3a3ejtrycloudflarevistaplestudynamic-dnsrvaroyxn--mgba3a4f16axn--mgba3a4fra1-deloittevaksdalxn--mgba7c0bbn0axn--mgbaakc7dvfstdlibestadxn--mgbaam7a8hakonexn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscordsays3-website-ap-northeast-1xn--mgbai9azgqp6jejuniperxn--mgbayh7gpalmaseratis-an-entertainerxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskosherbrookegawaxn--mgbqly7c0a67fbclintonkotsukubankarumaifarmsteadrobaknoluoktachikawakayamadridvallee-aosteroyxn--mgbqly7cvafr-1xn--mgbt3dhdxn--mgbtf8flapymntrysiljanxn--mgbtx2bauhauspostman-echocolatemasekd1xn--mgbx4cd0abbvieeexn--mix082fbxoschweizxn--mix891fedorainfraclouderaxn--mjndalen-64axn--mk0axin-vpnclothingdustdatadetectjmaxxxn--12c1fe0bradescotlandrrxn--mk1bu44cn-northwest-1xn--mkru45is-bykleclerchoshibuyachiyodancexn--mlatvuopmi-s4axn--mli-tlavangenxn--mlselv-iuaxn--moreke-juaxn--mori-qsakurais-certifiedxn--mosjen-eyawaraxn--mot-tlazioxn--mre-og-romsdal-qqbuseranishiaritakurashikis-foundationxn--msy-ula0hakubaghdadultravelchannelxn--mtta-vrjjat-k7aflakstadaokagakicks-assnasaarlandxn--muost-0qaxn--mxtq1minisitexn--ngbc5azdxn--ngbe9e0axn--ngbrxn--45q11citadelhicampinashikiminohostfoldnavyxn--nit225koshimizumakiyosunnydayxn--nmesjevuemie-tcbalestrandabergamoarekeymachineustarnbergxn--nnx388axn--nodessakyotanabelaudiopsysynology-dstreamlitappittsburghofficialxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeserveftplanetariuminamitanexn--nvuotna-hwaxn--nyqy26axn--o1achernihivgubsxn--o3cw4hakuis-a-democratravelersinsurancexn--o3cyx2axn--od0algxn--od0aq3belementorayoshiokanumazuryukuhashimojibxos3-website-ap-southeast-1xn--ogbpf8flatangerxn--oppegrd-ixaxn--ostery-fyawatahamaxn--osyro-wuaxn--otu796dxn--p1acfedorapeoplegoismailillehammerfeste-ipatriaxn--p1ais-gonexn--pgbs0dhlx3xn--porsgu-sta26fedoraprojectoyotsukaidoxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cngreaterxn--qcka1pmcpenzaporizhzhiaxn--qqqt11minnesotaketakayamassivegridxn--qxa6axn--qxamsterdamnserverbaniaxn--rady-iraxn--rdal-poaxn--rde-ulaxn--rdy-0nabaris-into-animeetrentin-sued-tirolxn--rennesy-v1axn--rhkkervju-01afeiraquarelleasingujaratoyouraxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturbruksgymnxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hakusanagochihayaakasakawaiishopitsitexn--rovu88bellevuelosangeles3-website-ap-southeast-2xn--rros-granvindafjordxn--rskog-uuaxn--rst-0naturhistorischesxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithaldenxn--s9brj9cnpyatigorskolecznagatorodoyxn--sandnessjen-ogbellunord-odalombardyn53xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4dbgdty6citichernovtsyncloudrangedaluccarbonia-iglesias-carboniaiglesiascarboniaxn--skierv-utazasxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5natuurwetenschappenginexn--slt-elabcieszynh-servebeero-stageiseiroumuenchencoreapigeelvinckoshunantankmpspawnextdirectrentino-s-tirolxn--smla-hraxn--smna-gratangentlentapisa-geekosugexn--snase-nraxn--sndre-land-0cbeneventochiokinoshimaintenancebinordreisa-hockeynutazurestaticappspaceusercontentateyamaveroykenglandeltaitogitsumitakagiizeasypanelblagrarchaeologyeongbuk0emmafann-arboretumbriamallamaceiobbcg123homepagefrontappchizip61123minsidaarborteaches-yogasawaracingroks-theatree123hjemmesidealerimo-i-rana4u2-localhistorybolzano-altoadigeometre-experts-comptables3-ap-northeast-123miwebcambridgehirn4t3l3p0rtarumizusawabogadobeaemcloud-fr123paginaweberkeleyokosukanrabruzzombieidskoguchikushinonsenasakuchinotsuchiurakawafaicloudineat-url-o-g-i-naval-d-aosta-valleyokote164-b-datacentermezproxyzgoraetnabudejjudaicadaquest-mon-blogueurodirumaceratabuseating-organicbcn-north-123saitamakawabartheshopencraftrainingdyniajuedischesapeakebayernavigationavoi234lima-cityeats3-ap-northeast-20001wwwedeployokozeastasiamunemurorangecloudplatform0xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbentleyurihonjournalistjohnikonanporovnobserverxn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bulls-fanxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbeppublishproxyusuharavocatanzarowegroweiboltashkentatamotorsitestingivingjemnes3-eu-central-1kappleadpages-12hpalmspringsakerxn--stre-toten-zcbeskidyn-ip24xn--t60b56axn--tckweddingxn--tiq49xqyjelasticbeanstalkhmelnitskiyamarumorimachidaxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbestbuyshoparenagareyamaizurugbyenvironmentalconservationflashdrivefsnillfjordiscordsezjampaleoceanographics3-website-eu-west-1xn--trentin-sdtirol-7vbetainaboxfuseekloges3-website-sa-east-1xn--trentino-sd-tirol-c3bhzcasertainaioirasebastopologyeongnamegawafflecellclstagemologicaliforniavoues3-eu-west-1xn--trentino-sdtirol-szbielawalbrzycharitypedreamhostersvp4xn--trentinosd-tirol-rzbiellaakesvuemieleccebizenakanotoddeninoheguriitatebayashiibahcavuotnagaivuotnagaokakyotambabybluebitelevisioncilla-speziaxarnetbank8s3-eu-west-2xn--trentinosdtirol-7vbieszczadygeyachimataijiiyamanouchikuhokuryugasakitaurayasudaxn--trentinsd-tirol-6vbievat-band-campaignieznombrendlyngengerdalces3-website-us-east-1xn--trentinsdtirol-nsbifukagawalesundiscountypeformelhusgardeninomiyakonojorpelandiscourses3-website-us-west-1xn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvestre-slidrexn--uc0ay4axn--uist22halsakakinokiaxn--uisz3gxn--unjrga-rtarnobrzegyptianxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbtularvikonskowolayangroupiemontexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccerxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbigvalledaostaobaomoriguchiharag-cloud-championshiphoplixboxenirasakincheonishiazaindependent-commissionishigouvicasinordeste-idclkarasjohkamikitayamatsurindependent-inquest-a-la-masionishiharaxn--vestvgy-ixa6oxn--vg-yiabkhaziaxn--vgan-qoaxn--vgsy-qoa0jelenia-goraxn--vgu402cnsantabarbaraxn--vhquvestre-totennishiawakuraxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861biharstadotsubetsugaruhrxn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1cntjomeldaluroyxn--wgbl6axn--xhq521bihorologyusuisservegame-serverxn--xkc2al3hye2axn--xkc2dl3a5ee0hammarfeastafricaravantaaxn--y9a3aquariumintereitrentino-sudtirolxn--yer-znaumburgxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4dbrk0cexn--ystre-slidre-ujbikedaejeonbukarasjokarasuyamarriottatsunoceanographiquehimejindependent-inquiryuufcfanishiizunazukindependent-panelomoliseminemrxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bilbaogashimadachicagoboavistanbulsan-sudtirolbia-tempio-olbiatempioolbialystokkeliwebredirectme-south-1xnbayxz
\ No newline at end of file
+birkenesoddtangentinglogoweirbitbucketrzynishikatakayamatta-varjjatjomembersaltdalovepopartysfjordiskussionsbereichatinhlfanishikatsuragitappassenger-associationishikawazukamiokameokamakurazakitaurayasudabitternidisrechtrainingloomy-routerbjarkoybjerkreimdbalsan-suedtirololitapunkapsienamsskoganeibmdeveloperauniteroirmemorialombardiadempresashibetsukumiyamagasakinderoyonagunicloudevelopmentaxiijimarriottayninhaccanthobby-siteval-d-aosta-valleyoriikaracolognebinatsukigataiwanumatajimidsundgcahcesuolocustomer-ocimperiautoscanalytics-gatewayonagoyaveroykenflfanpachihayaakasakawaiishopitsitemasekd1kappenginedre-eikerimo-siemenscaledekaascolipicenoboribetsucks3-eu-west-3utilities-16-balestrandabergentappsseekloges3-eu-west-123paginawebcamauction-acornfshostrodawaraktyubinskaunicommbank123kotisivultrobjectselinogradimo-i-rana4u2-localhostrolekanieruchomoscientistordal-o-g-i-nikolaevents3-ap-northeast-2-ddnsking123homepagefrontappchizip61123saitamakawababia-goracleaningheannakadomarineat-urlimanowarudakuneustarostwodzislawdev-myqnapcloudcontrolledgesuite-stagingdyniamusementdllclstagehirnikonantomobelementorayokosukanoyakumoliserniaurland-4-salernord-aurdalipaywhirlimiteddnslivelanddnss3-ap-south-123siteweberlevagangaviikanonji234lima-cityeats3-ap-southeast-123webseiteambulancechireadmyblogspotaribeiraogakicks-assurfakefurniturealmpmninoheguribigawaurskog-holandinggfarsundds3-ap-southeast-20001wwwedeployokote123hjemmesidealerdalaheadjuegoshikibichuobiraustevollimombetsupplyokoze164-balena-devices3-ca-central-123websiteleaf-south-12hparliamentatsunobninsk8s3-eu-central-1337bjugnishimerablackfridaynightjxn--11b4c3ditchyouripatriabloombergretaijindustriesteinkjerbloxcmsaludivtasvuodnakaiwanairlinekobayashimodatecnologiablushakotanishinomiyashironomniwebview-assetsalvadorbmoattachmentsamegawabmsamnangerbmwellbeingzonebnrweatherchannelsdvrdnsamparalleluxenishinoomotegotsukishiwadavvenjargamvikarpaczest-a-la-maisondre-landivttasvuotnakamai-stagingloppennebomlocalzonebonavstackartuzybondigitaloceanspacesamsclubartowest1-usamsunglugsmall-webspacebookonlineboomlaakesvuemielecceboschristmasakilatiron-riopretoeidsvollovesickaruizawabostik-serverrankoshigayachtsandvikcoromantovalle-d-aostakinouebostonakijinsekikogentlentapisa-geekarumaifmemsetkmaxxn--12c1fe0bradescotksatmpaviancapitalonebouncemerckmsdscloudiybounty-fullensakerrypropertiesangovtoyosatoyokawaboutiquebecologialaichaugiangmbhartiengiangminakamichiharaboutireservdrangedalpusercontentoyotapfizerboyfriendoftheinternetflixn--12cfi8ixb8lublindesnesanjosoyrovnoticiasannanishinoshimattelemarkasaokamikitayamatsurinfinitigopocznore-og-uvdalucaniabozen-sudtiroluccanva-appstmnishiokoppegardray-dnsupdaterbozen-suedtirolukowesteuropencraftoyotomiyazakinsurealtypeformesswithdnsannohekinanporovigonohejinternationaluroybplacedogawarabikomaezakirunordkappgfoggiabrandrayddns5ybrasiliadboxoslockerbresciaogashimadachicappadovaapstemp-dnswatchest-mon-blogueurodirumagazinebrindisiciliabroadwaybroke-itvedestrandraydnsanokashibatakashimashikiyosatokigawabrokerbrothermesserlifestylebtimnetzpisdnpharmaciensantamariakebrowsersafetymarketingmodumetacentrumeteorappharmacymruovatlassian-dev-builderschaefflerbrumunddalutskashiharabrusselsantoandreclaimsanukintlon-2bryanskiptveterinaireadthedocsaobernardovre-eikerbrynebwestus2bzhitomirbzzwhitesnowflakecommunity-prochowicecomodalenissandoycompanyaarphdfcbankasumigaurawa-mazowszexn--1ck2e1bambinagisobetsuldalpha-myqnapcloudaccess3-us-east-2ixboxeroxfinityolasiteastus2comparemarkerryhotelsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacomsecaasnesoddeno-stagingrondarcondoshifteditorxn--1ctwolominamatarnobrzegrongrossetouchijiwadedyn-berlincolnissayokoshibahikariyaltakazakinzais-a-bookkeepermarshallstatebankasuyalibabahccavuotnagaraholtaleniwaizumiotsurugashimaintenanceomutazasavonarviikaminoyamaxunispaceconferenceconstructionflashdrivefsncf-ipfsaxoconsuladobeio-static-accesscamdvrcampaniaconsultantranoyconsultingroundhandlingroznysaitohnoshookuwanakayamangyshlakdnepropetrovskanlandyndns-freeboxostrowwlkpmgrphilipsyno-dschokokekscholarshipschoolbusinessebycontactivetrailcontagematsubaravendbambleborkdalvdalcest-le-patron-rancherkasydneyukuhashimokawavoues3-sa-east-1contractorskenissedalcookingruecoolblogdnsfor-better-thanhhoarairforcentralus-1cooperativano-frankivskodjeephonefosschoolsztynsetransiphotographysiocoproductionschulplattforminamiechizenisshingucciprianiigatairaumalatvuopmicrolightinguidefinimaringatlancastercorsicafjschulservercosenzakopanecosidnshome-webservercellikescandypopensocialcouchpotatofrieschwarzgwangjuh-ohtawaramotoineppueblockbusternopilawacouncilcouponscrapper-sitecozoravennaharimalborkaszubytemarketscrappinguitarscrysecretrosnubananarepublic-inquiryurihonjoyenthickaragandaxarnetbankanzakiwielunnerepairbusanagochigasakishimabarakawaharaolbia-tempio-olbiatempioolbialowiezachpomorskiengiangjesdalolipopmcdirepbodyn53cqcxn--1lqs03niyodogawacrankyotobetsumidaknongujaratmallcrdyndns-homednscwhminamifuranocreditcardyndns-iphutholdingservehttpbincheonl-ams-1creditunionionjukujitawaravpagecremonashorokanaiecrewhoswholidaycricketnedalcrimeast-kazakhstanangercrotonecrowniphuyencrsvp4cruiseservehumourcuisinellair-traffic-controllagdenesnaaseinet-freakserveircasertainaircraftingvolloansnasaarlanduponthewifidelitypedreamhostersaotomeldaluxurycuneocupcakecuritibacgiangiangryggeecurvalled-aostargets-itranslatedyndns-mailcutegirlfriendyndns-office-on-the-webhoptogurafedoraprojectransurlfeirafembetsukuis-a-bruinsfanfermodenakasatsunairportrapaniizaferraraferraris-a-bulls-fanferrerotikagoshimalopolskanittedalfetsundyndns-wikimobetsumitakagildeskaliszkolamericanfamilydservemp3fgunmaniwamannorth-kazakhstanfhvalerfilegear-augustowiiheyakagefilegear-deatnuniversitysvardofilegear-gbizfilegear-iefilegear-jpmorgangwonporterfilegear-sg-1filminamiizukamiminefinalchikugokasellfyis-a-candidatefinancefinnoyfirebaseappiemontefirenetlifylkesbiblackbaudcdn-edgestackhero-networkinggroupowiathletajimabaria-vungtaudiopsysharpigboatshawilliamhillfirenzefirestonefireweblikes-piedmontravelersinsurancefirmdalegalleryfishingoldpoint2thisamitsukefitjarfitnessettsurugiminamimakis-a-catererfjalerfkatsushikabeebyteappilottonsberguovdageaidnunjargausdalflekkefjordyndns-workservep2phxn--1lqs71dyndns-remotewdyndns-picserveminecraftransporteflesbergushikamifuranorthflankatsuyamashikokuchuoflickragerokunohealthcareershellflierneflirfloginlinefloppythonanywherealtorfloraflorencefloripalmasfjordenfloristanohatajiris-a-celticsfanfloromskogxn--2m4a15eflowershimokitayamafltravinhlonganflynnhosting-clusterfncashgabadaddjabbottoyourafndyndns1fnwkzfolldalfoolfor-ourfor-somegurownproviderfor-theaterfordebianforexrotheworkpccwinbar0emmafann-arborlandd-dnsiskinkyowariasahikawarszawashtenawsmppl-wawsglobalacceleratorahimeshimakanegasakievennodebalancern4t3l3p0rtatarantours3-ap-northeast-123minsidaarborteaches-yogano-ipifony-123miwebaccelastx4432-b-datacenterprisesakijobservableusercontentateshinanomachintaifun-dnsdojournalistoloseyouriparisor-fronavuotnarashinoharaetnabudejjunipereggio-emilia-romagnaroyboltateyamajureggiocalabriakrehamnayoro0o0forgotdnshimonitayanagithubpreviewsaikisarazure-mobileirfjordynnservepicservequakeforli-cesena-forlicesenaforlillehammerfeste-ipimientaketomisatoolshimonosekikawaforsalegoismailillesandefjordynservebbservesarcasmileforsandasuolodingenfortalfortefosneshimosuwalkis-a-chefashionstorebaseljordyndns-serverisignfotrdynulvikatowicefoxn--2scrj9casinordlandurbanamexnetgamersapporomurafozfr-1fr-par-1fr-par-2franamizuhoboleslawiecommerce-shoppingyeongnamdinhachijohanamakisofukushimaoris-a-conservativegarsheiheijis-a-cparachutingfredrikstadynv6freedesktopazimuthaibinhphuocelotenkawakayamagnetcieszynh-servebeero-stageiseiroumugifuchungbukharag-cloud-championshiphoplixn--30rr7yfreemyiphosteurovisionredumbrellangevagrigentobishimadridvagsoygardenebakkeshibechambagricoharugbydgoszczecin-berlindasdaburfreesitefreetlshimotsukefreisennankokubunjis-a-cubicle-slavellinodeobjectshimotsumafrenchkisshikindleikangerfreseniushinichinanfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganshinjotelulubin-vpncateringebunkyonanaoshimamateramockashiwarafrognfrolandynvpnpluservicesevastopolitiendafrom-akamaized-stagingfrom-alfrom-arfrom-azurewebsiteshikagamiishibuyabukihokuizumobaragusabaerobaticketshinjukuleuvenicefrom-campobassociatest-iserveblogsytenrissadistdlibestadultrentin-sudtirolfrom-coachaseljeducationcillahppiacenzaganfrom-ctrentin-sued-tirolfrom-dcatfooddagestangefrom-decagliarikuzentakataikillfrom-flapymntrentin-suedtirolfrom-gap-east-1from-higashiagatsumagoianiafrom-iafrom-idyroyrvikingulenfrom-ilfrom-in-the-bandairtelebitbridgestonemurorangecloudplatform0from-kshinkamigototalfrom-kyfrom-langsonyantakahamalselveruminamiminowafrom-malvikaufentigerfrom-mdfrom-mein-vigorlicefrom-mifunefrom-mnfrom-modshinshinotsurgeryfrom-mshinshirofrom-mtnfrom-ncatholicurus-4from-ndfrom-nefrom-nhs-heilbronnoysundfrom-njshintokushimafrom-nminamioguni5from-nvalledaostargithubusercontentrentino-a-adigefrom-nycaxiaskvollpagesardegnarutolgaulardalvivanovoldafrom-ohdancefrom-okegawassamukawataris-a-democratrentino-aadigefrom-orfrom-panasonichernovtsykkylvenneslaskerrylogisticsardiniafrom-pratohmamurogawatsonrenderfrom-ris-a-designerimarugame-hostyhostingfrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--32vp30hachinoheavyfrom-utsiracusagaeroclubmedecin-addrammenuorodoyerfrom-val-daostavalleyfrom-vtrentino-alto-adigefrom-wafrom-wiardwebthingsjcbnpparibashkiriafrom-wvallee-aosteroyfrom-wyfrosinonefrostabackplaneapplebesbyengerdalp1froyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairtrafficplexus-2fujinomiyadapliefujiokazakinkobearalvahkikonaibetsubame-south-1fujisatoshoeshintomikasaharafujisawafujishiroishidakabiratoridediboxn--3bst00minamisanrikubetsupportrentino-altoadigefujitsuruokakamigaharafujiyoshidappnodearthainguyenfukayabeardubaikawagoefukuchiyamadatsunanjoburgfukudomigawafukuis-a-doctorfukumitsubishigakirkeneshinyoshitomiokamisatokamachippubetsuikitchenfukuokakegawafukuroishikariwakunigamigrationfukusakirovogradoyfukuyamagatakaharunusualpersonfunabashiriuchinadattorelayfunagatakahashimamakiryuohkurafunahashikamiamakusatsumasendaisenergyeongginowaniihamatamakinoharafundfunkfeuerfuoiskujukuriyamandalfuosskoczowindowskrakowinefurubirafurudonordreisa-hockeynutwentertainmentrentino-s-tirolfurukawajimangolffanshiojirishirifujiedafusoctrangfussagamiharafutabayamaguchinomihachimanagementrentino-stirolfutboldlygoingnowhere-for-more-og-romsdalfuttsurutashinais-a-financialadvisor-aurdalfuturecmshioyamelhushirahamatonbetsurnadalfuturehostingfuturemailingfvghakuis-a-gurunzenhakusandnessjoenhaldenhalfmoonscalebookinghostedpictetrentino-sud-tirolhalsakakinokiaham-radio-opinbar1hamburghammarfeastasiahamurakamigoris-a-hard-workershiraokamisunagawahanamigawahanawahandavvesiidanangodaddyn-o-saurealestatefarmerseinehandcrafteducatorprojectrentino-sudtirolhangglidinghangoutrentino-sued-tirolhannannestadhannosegawahanoipinkazohanyuzenhappouzshiratakahagianghasamap-northeast-3hasaminami-alpshishikuis-a-hunterhashbanghasudazaifudaigodogadobeioruntimedio-campidano-mediocampidanomediohasura-appinokokamikoaniikappudopaashisogndalhasvikazteleportrentino-suedtirolhatogayahoooshikamagayaitakamoriokakudamatsuehatoyamazakitahiroshimarcheapartmentshisuifuettertdasnetzhatsukaichikaiseiyoichipshitaramahattfjelldalhayashimamotobusells-for-lesshizukuishimoichilloutsystemscloudsitehazuminobushibukawahelplfinancialhelsinkitakamiizumisanofidonnakamurataitogliattinnhemneshizuokamitondabayashiogamagoriziahemsedalhepforgeblockshoujis-a-knightpointtokaizukamaishikshacknetrentinoa-adigehetemlbfanhigashichichibuzentsujiiehigashihiroshimanehigashiizumozakitakatakanabeautychyattorneyagawakkanaioirasebastopoleangaviikadenagahamaroyhigashikagawahigashikagurasoedahigashikawakitaaikitakyushunantankazunovecorebungoonow-dnshowahigashikurumeinforumzhigashimatsushimarnardalhigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshowtimeloyhigashinarusells-for-uhigashinehigashiomitamanoshiroomghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitamihamadahigashitsunospamproxyhigashiurausukitamotosunnydayhigashiyamatokoriyamanashiibaclieu-1higashiyodogawahigashiyoshinogaris-a-landscaperspectakasakitanakagusukumoldeliveryhippyhiraizumisatohokkaidontexistmein-iservschulecznakaniikawatanagurahirakatashinagawahiranais-a-lawyerhirarahiratsukaeruhirayaizuwakamatsubushikusakadogawahitachiomiyaginozawaonsensiositehitachiotaketakaokalmykiahitraeumtgeradegreehjartdalhjelmelandholyhomegoodshwinnersiiitesilkddiamondsimple-urlhomeipioneerhomelinkyard-cloudjiffyresdalhomelinuxn--3ds443ghomeofficehomesecuritymacaparecidahomesecuritypchiryukyuragiizehomesenseeringhomeskleppippugliahomeunixn--3e0b707ehondahonjyoitakarazukaluganskfh-muensterhornindalhorsells-itrentinoaadigehortendofinternet-dnsimplesitehospitalhotelwithflightsirdalhotmailhoyangerhoylandetakasagooglecodespotrentinoalto-adigehungyenhurdalhurumajis-a-liberalhyllestadhyogoris-a-libertarianhyugawarahyundaiwafuneis-very-evillasalleitungsenis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandoomdnstraceisk01isk02jenv-arubacninhbinhdinhktistoryjeonnamegawajetztrentinostiroljevnakerjewelryjgorajlljls-sto1jls-sto2jls-sto3jmpixolinodeusercontentrentinosud-tiroljnjcloud-ver-jpchitosetogitsuliguriajoyokaichibahcavuotnagaivuotnagaokakyotambabymilk3jozis-a-musicianjpnjprsolarvikhersonlanxessolundbeckhmelnitskiyamasoykosaigawakosakaerodromegalloabatobamaceratachikawafaicloudineencoreapigeekoseis-a-painterhostsolutionslupskhakassiakosheroykoshimizumakis-a-patsfankoshughesomakosugekotohiradomainstitutekotourakouhokumakogenkounosupersalevangerkouyamasudakouzushimatrixn--3pxu8khplaystation-cloudyclusterkozagawakozakis-a-personaltrainerkozowiosomnarviklabudhabikinokawachinaganoharamcocottekpnkppspbarcelonagawakepnord-odalwaysdatabaseballangenkainanaejrietisalatinabenogiehtavuoatnaamesjevuemielnombrendlyngen-rootaruibxos3-us-gov-west-1krasnikahokutokonamegatakatoris-a-photographerokussldkrasnodarkredstonekrelliankristiansandcatsoowitdkmpspawnextdirectrentinosudtirolkristiansundkrodsheradkrokstadelvaldaostavangerkropyvnytskyis-a-playershiftcryptonomichinomiyakekryminamiyamashirokawanabelaudnedalnkumamotoyamatsumaebashimofusakatakatsukis-a-republicanonoichinosekigaharakumanowtvaokumatorinokumejimatsumotofukekumenanyokkaichirurgiens-dentistes-en-francekundenkunisakis-a-rockstarachowicekunitachiaraisaijolsterkunitomigusukukis-a-socialistgstagekunneppubtlsopotrentinosued-tirolkuokgroupizzakurgankurobegetmyipirangalluplidlugolekagaminorddalkurogimimozaokinawashirosatochiokinoshimagentositempurlkuroisodegaurakuromatsunais-a-soxfankuronkurotakikawasakis-a-studentalkushirogawakustanais-a-teacherkassyncloudkusuppliesor-odalkutchanelkutnokuzumakis-a-techietipslzkvafjordkvalsundkvamsterdamnserverbaniakvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsor-varangermishimatsusakahogirlymisugitokorozawamitakeharamitourismartlabelingmitoyoakemiuramiyazurecontainerdpoliticaobangmiyotamatsukuris-an-actormjondalenmonzabrianzaramonzaebrianzamonzaedellabrianzamordoviamorenapolicemoriyamatsuuramoriyoshiminamiashigaramormonstermoroyamatsuzakis-an-actressmushcdn77-sslingmortgagemoscowithgoogleapiszmoseushimogosenmosjoenmoskenesorreisahayakawakamiichikawamisatottoris-an-anarchistjordalshalsenmossortlandmosviknx-serversusakiyosupabaseminemotegit-reposoruminanomoviemovimientokyotangotembaixadattowebhareidsbergmozilla-iotrentinosuedtirolmtranbytomaridagawalmartrentinsud-tirolmuikaminokawanishiaizubangemukoelnmunakatanemuosattemupkomatsushimassa-carrara-massacarraramassabuzzmurmanskomforbar2murotorcraftranakatombetsumy-gatewaymusashinodesakegawamuseumincomcastoripressorfoldmusicapetownnews-stagingmutsuzawamy-vigormy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoundcastorjdevcloudfunctionsokndalmydattolocalcertificationmyddnsgeekgalaxymydissentrentinsudtirolmydobissmarterthanyoumydrobofageometre-experts-comptablesowamydspectruminisitemyeffectrentinsued-tirolmyfastly-edgekey-stagingmyfirewalledreplittlestargardmyforuminterecifedextraspace-to-rentalstomakomaibaramyfritzmyftpaccesspeedpartnermyhome-servermyjinomykolaivencloud66mymailermymediapchoseikarugalsacemyokohamamatsudamypeplatformsharis-an-artistockholmestrandmypetsphinxn--41amyphotoshibajddarvodkafjordvaporcloudmypictureshinomypsxn--42c2d9amysecuritycamerakermyshopblockspjelkavikommunalforbundmyshopifymyspreadshopselectrentinsuedtirolmytabitordermythic-beastspydebergmytis-a-anarchistg-buildermytuleap-partnersquaresindevicenzamyvnchoshichikashukudoyamakeuppermywirecipescaracallypoivronpokerpokrovskommunepolkowicepoltavalle-aostavernpomorzeszowithyoutuberspacekitagawaponpesaro-urbino-pesarourbinopesaromasvuotnaritakurashikis-bykleclerchitachinakagawaltervistaipeigersundynamic-dnsarlpordenonepornporsangerporsangugeporsgrunnanpoznanpraxihuanprdprgmrprimetelprincipeprivatelinkomonowruzhgorodeoprivatizehealthinsuranceprofesionalprogressivegasrlpromonza-e-della-brianzaptokuyamatsushigepropertysnesrvarggatrevisogneprotectionprotonetroandindependent-inquest-a-la-masionprudentialpruszkowiwatsukiyonotaireserve-onlineprvcyonabarumbriaprzeworskogpunyufuelpupulawypussycatanzarowixsitepvhachirogatakahatakaishimojis-a-geekautokeinotteroypvtrogstadpwchowderpzqhadanorthwesternmutualqldqotoyohashimotoshimaqponiatowadaqslgbtroitskomorotsukagawaqualifioapplatter-applatterplcube-serverquangngais-certifiedugit-pagespeedmobilizeroticaltanissettailscaleforcequangninhthuanquangtritonoshonais-foundationquickconnectromsakuragawaquicksytestreamlitapplumbingouvaresearchitectesrhtrentoyonakagyokutoyakomakizunokunimimatakasugais-an-engineeringquipelementstrippertuscanytushungrytuvalle-daostamayukis-into-animeiwamizawatuxfamilytuyenquangbinhthuantwmailvestnesuzukis-gonevestre-slidreggio-calabriavestre-totennishiawakuravestvagoyvevelstadvibo-valentiaavibovalentiavideovinhphuchromedicinagatorogerssarufutsunomiyawakasaikaitakokonoevinnicarbonia-iglesias-carboniaiglesiascarboniavinnytsiavipsinaapplurinacionalvirginanmokurennebuvirtual-userveexchangevirtualservervirtualuserveftpodhalevisakurais-into-carsnoasakuholeckodairaviterboliviajessheimmobilienvivianvivoryvixn--45br5cylvlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavmintsorocabalashovhachiojiyahikobierzycevologdanskoninjambylvolvolkswagencyouvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiechungnamdalseidfjordynathomebuiltwithdarkhangelskypecorittogojomeetoystre-slidrettozawawmemergencyahabackdropalermochizukikirarahkkeravjuwmflabsvalbardunloppadualstackomvuxn--3hcrj9chonanbuskerudynamisches-dnsarpsborgripeeweeklylotterywoodsidellogliastradingworse-thanhphohochiminhadselbuyshouseshirakolobrzegersundongthapmircloudletshiranukamishihorowowloclawekonskowolawawpdevcloudwpenginepoweredwphostedmailwpmucdnipropetrovskygearappodlasiellaknoluoktagajobojis-an-entertainerwpmudevcdnaccessojamparaglidingwritesthisblogoipodzonewroclawmcloudwsseoullensvanguardianwtcp4wtfastlylbanzaicloudappspotagereporthruherecreationinomiyakonojorpelandigickarasjohkameyamatotakadawuozuerichardlillywzmiuwajimaxn--4it797konsulatrobeepsondriobranconagareyamaizuruhrxn--4pvxs4allxn--54b7fta0ccistrondheimpertrixcdn77-secureadymadealstahaugesunderxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49citadelhichisochimkentozsdell-ogliastraderxn--5rtq34kontuminamiuonumatsunoxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264citicarrdrobakamaiorigin-stagingmxn--12co0c3b4evalleaostaobaomoriguchiharaffleentrycloudflare-ipfstcgroupaaskimitsubatamibulsan-suedtirolkuszczytnoopscbgrimstadrrxn--80aaa0cvacationsvchoyodobashichinohealth-carereforminamidaitomanaustdalxn--80adxhksveioxn--80ao21axn--80aqecdr1axn--80asehdbarclaycards3-us-west-1xn--80aswgxn--80aukraanghkeliwebpaaskoyabeagleboardxn--8dbq2axn--8ltr62konyvelohmusashimurayamassivegridxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisencowayxn--90a3academiamicable-modemoneyxn--90aeroportsinfolionetworkangerxn--90aishobaraxn--90amckinseyxn--90azhytomyrxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byanagawaxn--asky-iraxn--aurskog-hland-jnbarclays3-us-west-2xn--avery-yuasakurastoragexn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsvelvikongsbergxn--bck1b9a5dre4civilaviationfabricafederation-webredirectmediatechnologyeongbukashiwazakiyosembokutamamuraxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptarumizusawaxn--blt-elabcienciamallamaceiobbcn-north-1xn--bmlo-graingerxn--bod-2natalxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpagesquare7xn--brum-voagatrustkanazawaxn--btsfjord-9zaxn--bulsan-sdtirol-nsbarefootballooningjovikarasjoketokashikiyokawaraxn--c1avgxn--c2br7gxn--c3s14misakis-a-therapistoiaxn--cck2b3baremetalombardyn-vpndns3-website-ap-northeast-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-into-cartoonsokamitsuexn--ciqpnxn--clchc0ea0b2g2a9gcdxn--czr694bargainstantcloudfrontdoorestauranthuathienhuebinordre-landiherokuapparochernigovernmentjeldsundiscordsays3-website-ap-southeast-1xn--czrs0trvaroyxn--czru2dxn--czrw28barrel-of-knowledgeapplinziitatebayashijonawatebizenakanojoetsumomodellinglassnillfjordiscordsezgoraxn--d1acj3barrell-of-knowledgecomputermezproxyzgorzeleccoffeedbackanagawarmiastalowa-wolayangroupars3-website-ap-southeast-2xn--d1alfaststacksevenassigdalxn--d1atrysiljanxn--d5qv7z876clanbibaiduckdnsaseboknowsitallxn--davvenjrga-y4axn--djrs72d6uyxn--djty4koobindalxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmail-boxaxn--eckvdtc9dxn--efvn9svn-repostuff-4-salexn--efvy88haebaruericssongdalenviknaklodzkochikushinonsenasakuchinotsuchiurakawaxn--ehqz56nxn--elqq16hagakhanhhoabinhduongxn--eveni-0qa01gaxn--f6qx53axn--fct429kooris-a-nascarfanxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsbcleverappsassarinuyamashinazawaxn--fiq64barsycenterprisecloudcontrolappgafanquangnamasteigenoamishirasatochigifts3-website-eu-west-1xn--fiqs8swidnicaravanylvenetogakushimotoganexn--fiqz9swidnikitagatakkomaganexn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbsswiebodzindependent-commissionxn--forlcesena-c8axn--fpcrj9c3dxn--frde-granexn--frna-woaxn--frya-hraxn--fzc2c9e2clickrisinglesjaguarxn--fzys8d69uvgmailxn--g2xx48clinicasacampinagrandebungotakadaemongolianishitosashimizunaminamiawajikintuitoyotsukaidownloadrudtvsaogoncapooguyxn--gckr3f0fastvps-serveronakanotoddenxn--gecrj9cliniquedaklakasamatsudoesntexisteingeekasserversicherungroks-theatrentin-sud-tirolxn--ggaviika-8ya47hagebostadxn--gildeskl-g0axn--givuotna-8yandexcloudxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-into-gamessinamsosnowieconomiasadojin-dslattuminamitanexn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45brj9churcharterxn--gnstigliefern-wobihirosakikamijimayfirstorfjordxn--h-2failxn--h1ahnxn--h1alizxn--h2breg3eveneswinoujsciencexn--h2brj9c8clothingdustdatadetectrani-andria-barletta-trani-andriaxn--h3cuzk1dienbienxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinehimejiiyamanouchikujoinvilleirvikarasuyamashikemrevistathellequipmentjmaxxxjavald-aostatics3-website-sa-east-1xn--hebda8basicserversejny-2xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-k3swisstufftoread-booksnestudioxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyaotsusonoxn--io0a7is-leetrentinoaltoadigexn--j1adpohlxn--j1aefauskedsmokorsetagayaseralingenovaraxn--j1ael8basilicataniaxn--j1amhaibarakisosakitahatakamatsukawaxn--j6w193gxn--jlq480n2rgxn--jlster-byasakaiminatoyookananiimiharuxn--jrpeland-54axn--jvr189misasaguris-an-accountantsmolaquilaocais-a-linux-useranishiaritabashikaoizumizakitashiobaraxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45q11circlerkstagentsasayamaxn--koluokta-7ya57haiduongxn--kprw13dxn--kpry57dxn--kput3is-lostre-toteneis-a-llamarumorimachidaxn--krager-gyasugitlabbvieeexn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdfastly-terrariuminamiiseharaxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasuokanmakiwakuratexn--kvnangen-k0axn--l-1fairwindsynology-diskstationxn--l1accentureklamborghinikkofuefukihabororosynology-dsuzakadnsaliastudynaliastrynxn--laheadju-7yatominamibosoftwarendalenugxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52basketballfinanzjaworznoticeableksvikaratsuginamikatagamilanotogawaxn--lesund-huaxn--lgbbat1ad8jejuxn--lgrd-poacctulaspeziaxn--lhppi-xqaxn--linds-pramericanexpresservegame-serverxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacn-northwest-1xn--lten-granvindafjordxn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddesxn--mgb9awbfbsbxn--1qqw23axn--mgba3a3ejtunesuzukamogawaxn--mgba3a4f16axn--mgba3a4fra1-deloittexn--mgba7c0bbn0axn--mgbaakc7dvfsxn--mgbaam7a8haiphongonnakatsugawaxn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscountry-snowplowiczeladzlgleezeu-2xn--mgbai9azgqp6jelasticbeanstalkharkovalleeaostexn--mgbayh7gparasitexn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskopervikhmelnytskyivalleedaostexn--mgbqly7c0a67fbcngroks-thisayamanobeatsaudaxn--mgbqly7cvafricargoboavistanbulsan-sudtirolxn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhauspostman-echofunatoriginstances3-website-us-east-1xn--mgbx4cd0abkhaziaxn--mix082fbx-osewienxn--mix891fbxosexyxn--mjndalen-64axn--mk0axindependent-inquiryxn--mk1bu44cnpyatigorskjervoyagexn--mkru45is-not-certifiedxn--mlatvuopmi-s4axn--mli-tlavagiskexn--mlselv-iuaxn--moreke-juaxn--mori-qsakuratanxn--mosjen-eyatsukannamihokksundxn--mot-tlavangenxn--mre-og-romsdal-qqbuservecounterstrikexn--msy-ula0hair-surveillancexn--mtta-vrjjat-k7aflakstadaokayamazonaws-cloud9guacuiababybluebiteckidsmynasushiobaracingrok-freeddnsfreebox-osascoli-picenogatabuseating-organicbcgjerdrumcprequalifymelbourneasypanelblagrarq-authgear-stagingjerstadeltaishinomakilovecollegefantasyleaguenoharauthgearappspacehosted-by-previderehabmereitattoolforgerockyombolzano-altoadigeorgeorgiauthordalandroideporteatonamidorivnebetsukubankanumazuryomitanocparmautocodebergamoarekembuchikumagayagawafflecelloisirs3-external-180reggioemiliaromagnarusawaustrheimbalsan-sudtirolivingitpagexlivornobserveregruhostingivestbyglandroverhalladeskjakamaiedge-stagingivingjemnes3-eu-west-2038xn--muost-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--4dbgdty6ciscofreakamaihd-stagingriwataraindroppdalxn--nit225koryokamikawanehonbetsuwanouchikuhokuryugasakis-a-nursellsyourhomeftpiwatexn--nmesjevuemie-tcbalatinord-frontierxn--nnx388axn--nodessakurawebsozais-savedxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1achernivtsicilynxn--4dbrk0cexn--o3cw4hakatanortonkotsunndalxn--o3cyx2axn--od0algardxn--od0aq3beneventodayusuharaxn--ogbpf8fldrvelvetromsohuissier-justicexn--oppegrd-ixaxn--ostery-fyatsushiroxn--osyro-wuaxn--otu796dxn--p1acfedjeezxn--p1ais-slickharkivallee-d-aostexn--pgbs0dhlx3xn--porsgu-sta26fedorainfraclouderaxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cnsauheradyndns-at-homedepotenzamamicrosoftbankasukabedzin-brbalsfjordietgoryoshiokanravocats3-fips-us-gov-west-1xn--qcka1pmcpenzapposxn--qqqt11misconfusedxn--qxa6axn--qxamunexus-3xn--rady-iraxn--rdal-poaxn--rde-ulazioxn--rdy-0nabaris-uberleetrentinos-tirolxn--rennesy-v1axn--rhkkervju-01afedorapeoplefrakkestadyndns-webhostingujogaszxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturalxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byawaraxn--rny31hakodatexn--rovu88bentleyusuitatamotorsitestinglitchernihivgubs3-website-us-west-1xn--rros-graphicsxn--rskog-uuaxn--rst-0naturbruksgymnxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byawatahamaxn--s-1faitheshopwarezzoxn--s9brj9cntraniandriabarlettatraniandriaxn--sandnessjen-ogbentrendhostingliwiceu-3xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4gbriminiserverxn--skierv-utazurestaticappspaceusercontentunkongsvingerxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5navigationxn--slt-elabogadobeaemcloud-fr1xn--smla-hraxn--smna-gratangenxn--snase-nraxn--sndre-land-0cbeppublishproxyuufcfanirasakindependent-panelomonza-brianzaporizhzhedmarkarelianceu-4xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeskidyn-ip24xn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bloggerxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbestbuyshoparenagasakikuchikuseihicampinashikiminohostfoldnavyuzawaxn--stre-toten-zcbetainaboxfuselfipartindependent-reviewegroweibolognagasukeu-north-1xn--t60b56axn--tckweddingxn--tiq49xqyjelenia-goraxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbhzc66xn--trentin-sdtirol-7vbialystokkeymachineu-south-1xn--trentino-sd-tirol-c3bielawakuyachimataharanzanishiazaindielddanuorrindigenamerikawauevje-og-hornnes3-website-us-west-2xn--trentino-sdtirol-szbiella-speziaxn--trentinosd-tirol-rzbieszczadygeyachiyodaeguamfamscompute-1xn--trentinosdtirol-7vbievat-band-campaignieznoorstaplesakyotanabellunordeste-idclkarlsoyxn--trentinsd-tirol-6vbifukagawalbrzycharitydalomzaporizhzhiaxn--trentinsdtirol-nsbigv-infolkebiblegnicalvinklein-butterhcloudiscoursesalangenishigotpantheonsitexn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atventuresinstagingxn--uc0ay4axn--uist22hakonexn--uisz3gxn--unjrga-rtashkenturindalxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbturystykaneyamazoexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccertmgreaterxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbiharstadotsubetsugarulezajskiervaksdalondonetskarmoyxn--vestvgy-ixa6oxn--vg-yiabruzzombieidskogasawarackmazerbaijan-mayenbaidarmeniaxn--vgan-qoaxn--vgsy-qoa0jellybeanxn--vgu402coguchikuzenishiwakinvestmentsaveincloudyndns-at-workisboringsakershusrcfdyndns-blogsitexn--vhquvestfoldxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bihoronobeokagakikugawalesundiscoverdalondrinaplesknsalon-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1communexn--wgbl6axn--xhq521bikedaejeonbuk0xn--xkc2al3hye2axn--xkc2dl3a5ee0hakubackyardshiraois-a-greenxn--y9a3aquarelleasingxn--yer-znavois-very-badxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4it168dxn--ystre-slidre-ujbiofficialorenskoglobodoes-itcouldbeworldishangrilamdongnairkitapps-audibleasecuritytacticsxn--0trq7p7nnishiharaxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bipartsaloonishiizunazukindustriaxnbayernxz
\ No newline at end of file
diff --git a/publicsuffix/example_test.go b/publicsuffix/example_test.go
index 3f44dcfe7..c051dac8e 100644
--- a/publicsuffix/example_test.go
+++ b/publicsuffix/example_test.go
@@ -77,7 +77,7 @@ func ExamplePublicSuffix_manager() {
// > golang.dev dev is ICANN Managed
// > golang.net net is ICANN Managed
// > play.golang.org org is ICANN Managed
- // > gophers.in.space.museum space.museum is ICANN Managed
+ // > gophers.in.space.museum museum is ICANN Managed
// >
// > 0emm.com com is ICANN Managed
// > a.0emm.com a.0emm.com is Privately Managed
diff --git a/publicsuffix/gen.go b/publicsuffix/gen.go
index 2ad0abdc1..21c191415 100644
--- a/publicsuffix/gen.go
+++ b/publicsuffix/gen.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build ignore
-// +build ignore
package main
diff --git a/publicsuffix/table.go b/publicsuffix/table.go
index 6bdadcc44..78d400fa6 100644
--- a/publicsuffix/table.go
+++ b/publicsuffix/table.go
@@ -4,7 +4,7 @@ package publicsuffix
import _ "embed"
-const version = "publicsuffix.org's public_suffix_list.dat, git revision e248cbc92a527a166454afe9914c4c1b4253893f (2022-11-15T18:02:38Z)"
+const version = "publicsuffix.org's public_suffix_list.dat, git revision 63cbc63d470d7b52c35266aa96c4c98c96ec499c (2023-08-03T10:01:25Z)"
const (
nodesBits = 40
@@ -26,7 +26,7 @@ const (
)
// numTLD is the number of top level domains.
-const numTLD = 1494
+const numTLD = 1474
// text is the combined text of all labels.
//
@@ -63,8 +63,8 @@ var nodes uint40String
//go:embed data/children
var children uint32String
-// max children 718 (capacity 1023)
-// max text offset 32976 (capacity 65535)
-// max text length 36 (capacity 63)
-// max hi 9656 (capacity 16383)
-// max lo 9651 (capacity 16383)
+// max children 743 (capacity 1023)
+// max text offset 30876 (capacity 65535)
+// max text length 31 (capacity 63)
+// max hi 9322 (capacity 16383)
+// max lo 9317 (capacity 16383)
diff --git a/publicsuffix/table_test.go b/publicsuffix/table_test.go
index 99698271a..a297b3b0d 100644
--- a/publicsuffix/table_test.go
+++ b/publicsuffix/table_test.go
@@ -2,7 +2,7 @@
package publicsuffix
-const numICANNRules = 7367
+const numICANNRules = 6893
var rules = [...]string{
"ac",
@@ -302,9 +302,26 @@ var rules = [...]string{
"org.bi",
"biz",
"bj",
- "asso.bj",
- "barreau.bj",
- "gouv.bj",
+ "africa.bj",
+ "agro.bj",
+ "architectes.bj",
+ "assur.bj",
+ "avocats.bj",
+ "co.bj",
+ "com.bj",
+ "eco.bj",
+ "econo.bj",
+ "edu.bj",
+ "info.bj",
+ "loisirs.bj",
+ "money.bj",
+ "net.bj",
+ "org.bj",
+ "ote.bj",
+ "resto.bj",
+ "restaurant.bj",
+ "tourism.bj",
+ "univ.bj",
"bm",
"com.bm",
"edu.bm",
@@ -3596,552 +3613,6 @@ var rules = [...]string{
"co.mu",
"or.mu",
"museum",
- "academy.museum",
- "agriculture.museum",
- "air.museum",
- "airguard.museum",
- "alabama.museum",
- "alaska.museum",
- "amber.museum",
- "ambulance.museum",
- "american.museum",
- "americana.museum",
- "americanantiques.museum",
- "americanart.museum",
- "amsterdam.museum",
- "and.museum",
- "annefrank.museum",
- "anthro.museum",
- "anthropology.museum",
- "antiques.museum",
- "aquarium.museum",
- "arboretum.museum",
- "archaeological.museum",
- "archaeology.museum",
- "architecture.museum",
- "art.museum",
- "artanddesign.museum",
- "artcenter.museum",
- "artdeco.museum",
- "arteducation.museum",
- "artgallery.museum",
- "arts.museum",
- "artsandcrafts.museum",
- "asmatart.museum",
- "assassination.museum",
- "assisi.museum",
- "association.museum",
- "astronomy.museum",
- "atlanta.museum",
- "austin.museum",
- "australia.museum",
- "automotive.museum",
- "aviation.museum",
- "axis.museum",
- "badajoz.museum",
- "baghdad.museum",
- "bahn.museum",
- "bale.museum",
- "baltimore.museum",
- "barcelona.museum",
- "baseball.museum",
- "basel.museum",
- "baths.museum",
- "bauern.museum",
- "beauxarts.museum",
- "beeldengeluid.museum",
- "bellevue.museum",
- "bergbau.museum",
- "berkeley.museum",
- "berlin.museum",
- "bern.museum",
- "bible.museum",
- "bilbao.museum",
- "bill.museum",
- "birdart.museum",
- "birthplace.museum",
- "bonn.museum",
- "boston.museum",
- "botanical.museum",
- "botanicalgarden.museum",
- "botanicgarden.museum",
- "botany.museum",
- "brandywinevalley.museum",
- "brasil.museum",
- "bristol.museum",
- "british.museum",
- "britishcolumbia.museum",
- "broadcast.museum",
- "brunel.museum",
- "brussel.museum",
- "brussels.museum",
- "bruxelles.museum",
- "building.museum",
- "burghof.museum",
- "bus.museum",
- "bushey.museum",
- "cadaques.museum",
- "california.museum",
- "cambridge.museum",
- "can.museum",
- "canada.museum",
- "capebreton.museum",
- "carrier.museum",
- "cartoonart.museum",
- "casadelamoneda.museum",
- "castle.museum",
- "castres.museum",
- "celtic.museum",
- "center.museum",
- "chattanooga.museum",
- "cheltenham.museum",
- "chesapeakebay.museum",
- "chicago.museum",
- "children.museum",
- "childrens.museum",
- "childrensgarden.museum",
- "chiropractic.museum",
- "chocolate.museum",
- "christiansburg.museum",
- "cincinnati.museum",
- "cinema.museum",
- "circus.museum",
- "civilisation.museum",
- "civilization.museum",
- "civilwar.museum",
- "clinton.museum",
- "clock.museum",
- "coal.museum",
- "coastaldefence.museum",
- "cody.museum",
- "coldwar.museum",
- "collection.museum",
- "colonialwilliamsburg.museum",
- "coloradoplateau.museum",
- "columbia.museum",
- "columbus.museum",
- "communication.museum",
- "communications.museum",
- "community.museum",
- "computer.museum",
- "computerhistory.museum",
- "xn--comunicaes-v6a2o.museum",
- "contemporary.museum",
- "contemporaryart.museum",
- "convent.museum",
- "copenhagen.museum",
- "corporation.museum",
- "xn--correios-e-telecomunicaes-ghc29a.museum",
- "corvette.museum",
- "costume.museum",
- "countryestate.museum",
- "county.museum",
- "crafts.museum",
- "cranbrook.museum",
- "creation.museum",
- "cultural.museum",
- "culturalcenter.museum",
- "culture.museum",
- "cyber.museum",
- "cymru.museum",
- "dali.museum",
- "dallas.museum",
- "database.museum",
- "ddr.museum",
- "decorativearts.museum",
- "delaware.museum",
- "delmenhorst.museum",
- "denmark.museum",
- "depot.museum",
- "design.museum",
- "detroit.museum",
- "dinosaur.museum",
- "discovery.museum",
- "dolls.museum",
- "donostia.museum",
- "durham.museum",
- "eastafrica.museum",
- "eastcoast.museum",
- "education.museum",
- "educational.museum",
- "egyptian.museum",
- "eisenbahn.museum",
- "elburg.museum",
- "elvendrell.museum",
- "embroidery.museum",
- "encyclopedic.museum",
- "england.museum",
- "entomology.museum",
- "environment.museum",
- "environmentalconservation.museum",
- "epilepsy.museum",
- "essex.museum",
- "estate.museum",
- "ethnology.museum",
- "exeter.museum",
- "exhibition.museum",
- "family.museum",
- "farm.museum",
- "farmequipment.museum",
- "farmers.museum",
- "farmstead.museum",
- "field.museum",
- "figueres.museum",
- "filatelia.museum",
- "film.museum",
- "fineart.museum",
- "finearts.museum",
- "finland.museum",
- "flanders.museum",
- "florida.museum",
- "force.museum",
- "fortmissoula.museum",
- "fortworth.museum",
- "foundation.museum",
- "francaise.museum",
- "frankfurt.museum",
- "franziskaner.museum",
- "freemasonry.museum",
- "freiburg.museum",
- "fribourg.museum",
- "frog.museum",
- "fundacio.museum",
- "furniture.museum",
- "gallery.museum",
- "garden.museum",
- "gateway.museum",
- "geelvinck.museum",
- "gemological.museum",
- "geology.museum",
- "georgia.museum",
- "giessen.museum",
- "glas.museum",
- "glass.museum",
- "gorge.museum",
- "grandrapids.museum",
- "graz.museum",
- "guernsey.museum",
- "halloffame.museum",
- "hamburg.museum",
- "handson.museum",
- "harvestcelebration.museum",
- "hawaii.museum",
- "health.museum",
- "heimatunduhren.museum",
- "hellas.museum",
- "helsinki.museum",
- "hembygdsforbund.museum",
- "heritage.museum",
- "histoire.museum",
- "historical.museum",
- "historicalsociety.museum",
- "historichouses.museum",
- "historisch.museum",
- "historisches.museum",
- "history.museum",
- "historyofscience.museum",
- "horology.museum",
- "house.museum",
- "humanities.museum",
- "illustration.museum",
- "imageandsound.museum",
- "indian.museum",
- "indiana.museum",
- "indianapolis.museum",
- "indianmarket.museum",
- "intelligence.museum",
- "interactive.museum",
- "iraq.museum",
- "iron.museum",
- "isleofman.museum",
- "jamison.museum",
- "jefferson.museum",
- "jerusalem.museum",
- "jewelry.museum",
- "jewish.museum",
- "jewishart.museum",
- "jfk.museum",
- "journalism.museum",
- "judaica.museum",
- "judygarland.museum",
- "juedisches.museum",
- "juif.museum",
- "karate.museum",
- "karikatur.museum",
- "kids.museum",
- "koebenhavn.museum",
- "koeln.museum",
- "kunst.museum",
- "kunstsammlung.museum",
- "kunstunddesign.museum",
- "labor.museum",
- "labour.museum",
- "lajolla.museum",
- "lancashire.museum",
- "landes.museum",
- "lans.museum",
- "xn--lns-qla.museum",
- "larsson.museum",
- "lewismiller.museum",
- "lincoln.museum",
- "linz.museum",
- "living.museum",
- "livinghistory.museum",
- "localhistory.museum",
- "london.museum",
- "losangeles.museum",
- "louvre.museum",
- "loyalist.museum",
- "lucerne.museum",
- "luxembourg.museum",
- "luzern.museum",
- "mad.museum",
- "madrid.museum",
- "mallorca.museum",
- "manchester.museum",
- "mansion.museum",
- "mansions.museum",
- "manx.museum",
- "marburg.museum",
- "maritime.museum",
- "maritimo.museum",
- "maryland.museum",
- "marylhurst.museum",
- "media.museum",
- "medical.museum",
- "medizinhistorisches.museum",
- "meeres.museum",
- "memorial.museum",
- "mesaverde.museum",
- "michigan.museum",
- "midatlantic.museum",
- "military.museum",
- "mill.museum",
- "miners.museum",
- "mining.museum",
- "minnesota.museum",
- "missile.museum",
- "missoula.museum",
- "modern.museum",
- "moma.museum",
- "money.museum",
- "monmouth.museum",
- "monticello.museum",
- "montreal.museum",
- "moscow.museum",
- "motorcycle.museum",
- "muenchen.museum",
- "muenster.museum",
- "mulhouse.museum",
- "muncie.museum",
- "museet.museum",
- "museumcenter.museum",
- "museumvereniging.museum",
- "music.museum",
- "national.museum",
- "nationalfirearms.museum",
- "nationalheritage.museum",
- "nativeamerican.museum",
- "naturalhistory.museum",
- "naturalhistorymuseum.museum",
- "naturalsciences.museum",
- "nature.museum",
- "naturhistorisches.museum",
- "natuurwetenschappen.museum",
- "naumburg.museum",
- "naval.museum",
- "nebraska.museum",
- "neues.museum",
- "newhampshire.museum",
- "newjersey.museum",
- "newmexico.museum",
- "newport.museum",
- "newspaper.museum",
- "newyork.museum",
- "niepce.museum",
- "norfolk.museum",
- "north.museum",
- "nrw.museum",
- "nyc.museum",
- "nyny.museum",
- "oceanographic.museum",
- "oceanographique.museum",
- "omaha.museum",
- "online.museum",
- "ontario.museum",
- "openair.museum",
- "oregon.museum",
- "oregontrail.museum",
- "otago.museum",
- "oxford.museum",
- "pacific.museum",
- "paderborn.museum",
- "palace.museum",
- "paleo.museum",
- "palmsprings.museum",
- "panama.museum",
- "paris.museum",
- "pasadena.museum",
- "pharmacy.museum",
- "philadelphia.museum",
- "philadelphiaarea.museum",
- "philately.museum",
- "phoenix.museum",
- "photography.museum",
- "pilots.museum",
- "pittsburgh.museum",
- "planetarium.museum",
- "plantation.museum",
- "plants.museum",
- "plaza.museum",
- "portal.museum",
- "portland.museum",
- "portlligat.museum",
- "posts-and-telecommunications.museum",
- "preservation.museum",
- "presidio.museum",
- "press.museum",
- "project.museum",
- "public.museum",
- "pubol.museum",
- "quebec.museum",
- "railroad.museum",
- "railway.museum",
- "research.museum",
- "resistance.museum",
- "riodejaneiro.museum",
- "rochester.museum",
- "rockart.museum",
- "roma.museum",
- "russia.museum",
- "saintlouis.museum",
- "salem.museum",
- "salvadordali.museum",
- "salzburg.museum",
- "sandiego.museum",
- "sanfrancisco.museum",
- "santabarbara.museum",
- "santacruz.museum",
- "santafe.museum",
- "saskatchewan.museum",
- "satx.museum",
- "savannahga.museum",
- "schlesisches.museum",
- "schoenbrunn.museum",
- "schokoladen.museum",
- "school.museum",
- "schweiz.museum",
- "science.museum",
- "scienceandhistory.museum",
- "scienceandindustry.museum",
- "sciencecenter.museum",
- "sciencecenters.museum",
- "science-fiction.museum",
- "sciencehistory.museum",
- "sciences.museum",
- "sciencesnaturelles.museum",
- "scotland.museum",
- "seaport.museum",
- "settlement.museum",
- "settlers.museum",
- "shell.museum",
- "sherbrooke.museum",
- "sibenik.museum",
- "silk.museum",
- "ski.museum",
- "skole.museum",
- "society.museum",
- "sologne.museum",
- "soundandvision.museum",
- "southcarolina.museum",
- "southwest.museum",
- "space.museum",
- "spy.museum",
- "square.museum",
- "stadt.museum",
- "stalbans.museum",
- "starnberg.museum",
- "state.museum",
- "stateofdelaware.museum",
- "station.museum",
- "steam.museum",
- "steiermark.museum",
- "stjohn.museum",
- "stockholm.museum",
- "stpetersburg.museum",
- "stuttgart.museum",
- "suisse.museum",
- "surgeonshall.museum",
- "surrey.museum",
- "svizzera.museum",
- "sweden.museum",
- "sydney.museum",
- "tank.museum",
- "tcm.museum",
- "technology.museum",
- "telekommunikation.museum",
- "television.museum",
- "texas.museum",
- "textile.museum",
- "theater.museum",
- "time.museum",
- "timekeeping.museum",
- "topology.museum",
- "torino.museum",
- "touch.museum",
- "town.museum",
- "transport.museum",
- "tree.museum",
- "trolley.museum",
- "trust.museum",
- "trustee.museum",
- "uhren.museum",
- "ulm.museum",
- "undersea.museum",
- "university.museum",
- "usa.museum",
- "usantiques.museum",
- "usarts.museum",
- "uscountryestate.museum",
- "usculture.museum",
- "usdecorativearts.museum",
- "usgarden.museum",
- "ushistory.museum",
- "ushuaia.museum",
- "uslivinghistory.museum",
- "utah.museum",
- "uvic.museum",
- "valley.museum",
- "vantaa.museum",
- "versailles.museum",
- "viking.museum",
- "village.museum",
- "virginia.museum",
- "virtual.museum",
- "virtuel.museum",
- "vlaanderen.museum",
- "volkenkunde.museum",
- "wales.museum",
- "wallonie.museum",
- "war.museum",
- "washingtondc.museum",
- "watchandclock.museum",
- "watch-and-clock.museum",
- "western.museum",
- "westfalen.museum",
- "whaling.museum",
- "wildlife.museum",
- "williamsburg.museum",
- "windmill.museum",
- "workshop.museum",
- "york.museum",
- "yorkshire.museum",
- "yosemite.museum",
- "youth.museum",
- "zoological.museum",
- "zoology.museum",
- "xn--9dbhblg6di.museum",
- "xn--h1aegh.museum",
"mv",
"aero.mv",
"biz.mv",
@@ -5133,52 +4604,60 @@ var rules = [...]string{
"turystyka.pl",
"gov.pl",
"ap.gov.pl",
+ "griw.gov.pl",
"ic.gov.pl",
"is.gov.pl",
- "us.gov.pl",
"kmpsp.gov.pl",
+ "konsulat.gov.pl",
"kppsp.gov.pl",
- "kwpsp.gov.pl",
- "psp.gov.pl",
- "wskr.gov.pl",
"kwp.gov.pl",
+ "kwpsp.gov.pl",
+ "mup.gov.pl",
"mw.gov.pl",
- "ug.gov.pl",
- "um.gov.pl",
- "umig.gov.pl",
- "ugim.gov.pl",
- "upow.gov.pl",
- "uw.gov.pl",
- "starostwo.gov.pl",
+ "oia.gov.pl",
+ "oirm.gov.pl",
+ "oke.gov.pl",
+ "oow.gov.pl",
+ "oschr.gov.pl",
+ "oum.gov.pl",
"pa.gov.pl",
+ "pinb.gov.pl",
+ "piw.gov.pl",
"po.gov.pl",
+ "pr.gov.pl",
+ "psp.gov.pl",
"psse.gov.pl",
"pup.gov.pl",
"rzgw.gov.pl",
"sa.gov.pl",
+ "sdn.gov.pl",
+ "sko.gov.pl",
"so.gov.pl",
"sr.gov.pl",
- "wsa.gov.pl",
- "sko.gov.pl",
+ "starostwo.gov.pl",
+ "ug.gov.pl",
+ "ugim.gov.pl",
+ "um.gov.pl",
+ "umig.gov.pl",
+ "upow.gov.pl",
+ "uppo.gov.pl",
+ "us.gov.pl",
+ "uw.gov.pl",
"uzs.gov.pl",
+ "wif.gov.pl",
"wiih.gov.pl",
"winb.gov.pl",
- "pinb.gov.pl",
"wios.gov.pl",
"witd.gov.pl",
- "wzmiuw.gov.pl",
- "piw.gov.pl",
"wiw.gov.pl",
- "griw.gov.pl",
- "wif.gov.pl",
- "oum.gov.pl",
- "sdn.gov.pl",
- "zp.gov.pl",
- "uppo.gov.pl",
- "mup.gov.pl",
+ "wkz.gov.pl",
+ "wsa.gov.pl",
+ "wskr.gov.pl",
+ "wsse.gov.pl",
"wuoz.gov.pl",
- "konsulat.gov.pl",
- "oirm.gov.pl",
+ "wzmiuw.gov.pl",
+ "zp.gov.pl",
+ "zpisdn.gov.pl",
"augustow.pl",
"babia-gora.pl",
"bedzin.pl",
@@ -5722,6 +5201,7 @@ var rules = [...]string{
"kirovograd.ua",
"km.ua",
"kr.ua",
+ "kropyvnytskyi.ua",
"krym.ua",
"ks.ua",
"kv.ua",
@@ -6063,18 +5543,84 @@ var rules = [...]string{
"net.vi",
"org.vi",
"vn",
+ "ac.vn",
+ "ai.vn",
+ "biz.vn",
"com.vn",
- "net.vn",
- "org.vn",
"edu.vn",
"gov.vn",
- "int.vn",
- "ac.vn",
- "biz.vn",
+ "health.vn",
+ "id.vn",
"info.vn",
+ "int.vn",
+ "io.vn",
"name.vn",
+ "net.vn",
+ "org.vn",
"pro.vn",
- "health.vn",
+ "angiang.vn",
+ "bacgiang.vn",
+ "backan.vn",
+ "baclieu.vn",
+ "bacninh.vn",
+ "baria-vungtau.vn",
+ "bentre.vn",
+ "binhdinh.vn",
+ "binhduong.vn",
+ "binhphuoc.vn",
+ "binhthuan.vn",
+ "camau.vn",
+ "cantho.vn",
+ "caobang.vn",
+ "daklak.vn",
+ "daknong.vn",
+ "danang.vn",
+ "dienbien.vn",
+ "dongnai.vn",
+ "dongthap.vn",
+ "gialai.vn",
+ "hagiang.vn",
+ "haiduong.vn",
+ "haiphong.vn",
+ "hanam.vn",
+ "hanoi.vn",
+ "hatinh.vn",
+ "haugiang.vn",
+ "hoabinh.vn",
+ "hungyen.vn",
+ "khanhhoa.vn",
+ "kiengiang.vn",
+ "kontum.vn",
+ "laichau.vn",
+ "lamdong.vn",
+ "langson.vn",
+ "laocai.vn",
+ "longan.vn",
+ "namdinh.vn",
+ "nghean.vn",
+ "ninhbinh.vn",
+ "ninhthuan.vn",
+ "phutho.vn",
+ "phuyen.vn",
+ "quangbinh.vn",
+ "quangnam.vn",
+ "quangngai.vn",
+ "quangninh.vn",
+ "quangtri.vn",
+ "soctrang.vn",
+ "sonla.vn",
+ "tayninh.vn",
+ "thaibinh.vn",
+ "thainguyen.vn",
+ "thanhhoa.vn",
+ "thanhphohochiminh.vn",
+ "thuathienhue.vn",
+ "tiengiang.vn",
+ "travinh.vn",
+ "tuyenquang.vn",
+ "vinhlong.vn",
+ "vinhphuc.vn",
+ "yenbai.vn",
"vu",
"com.vu",
"edu.vu",
@@ -6221,7 +5767,6 @@ var rules = [...]string{
"org.zw",
"aaa",
"aarp",
- "abarth",
"abb",
"abbott",
"abbvie",
@@ -6235,7 +5780,6 @@ var rules = [...]string{
"accountants",
"aco",
"actor",
- "adac",
"ads",
"adult",
"aeg",
@@ -6249,7 +5793,6 @@ var rules = [...]string{
"airforce",
"airtel",
"akdn",
- "alfaromeo",
"alibaba",
"alipay",
"allfinanz",
@@ -6445,7 +5988,6 @@ var rules = [...]string{
"contact",
"contractors",
"cooking",
- "cookingchannel",
"cool",
"corsica",
"country",
@@ -6554,7 +6096,6 @@ var rules = [...]string{
"feedback",
"ferrari",
"ferrero",
- "fiat",
"fidelity",
"fido",
"film",
@@ -6576,7 +6117,6 @@ var rules = [...]string{
"fly",
"foo",
"food",
- "foodnetwork",
"football",
"ford",
"forex",
@@ -6661,7 +6201,6 @@ var rules = [...]string{
"helsinki",
"here",
"hermes",
- "hgtv",
"hiphop",
"hisamitsu",
"hitachi",
@@ -6680,7 +6219,6 @@ var rules = [...]string{
"host",
"hosting",
"hot",
- "hoteles",
"hotels",
"hotmail",
"house",
@@ -6761,7 +6299,6 @@ var rules = [...]string{
"lamborghini",
"lamer",
"lancaster",
- "lancia",
"land",
"landrover",
"lanxess",
@@ -6789,7 +6326,6 @@ var rules = [...]string{
"limited",
"limo",
"lincoln",
- "linde",
"link",
"lipsy",
"live",
@@ -6800,7 +6336,6 @@ var rules = [...]string{
"loans",
"locker",
"locus",
- "loft",
"lol",
"london",
"lotte",
@@ -6813,7 +6348,6 @@ var rules = [...]string{
"lundbeck",
"luxe",
"luxury",
- "macys",
"madrid",
"maif",
"maison",
@@ -6827,7 +6361,6 @@ var rules = [...]string{
"markets",
"marriott",
"marshalls",
- "maserati",
"mattel",
"mba",
"mckinsey",
@@ -6868,7 +6401,6 @@ var rules = [...]string{
"mtn",
"mtr",
"music",
- "mutual",
"nab",
"nagoya",
"natura",
@@ -6933,7 +6465,6 @@ var rules = [...]string{
"partners",
"parts",
"party",
- "passagens",
"pay",
"pccw",
"pet",
@@ -7063,7 +6594,6 @@ var rules = [...]string{
"select",
"sener",
"services",
- "ses",
"seven",
"sew",
"sex",
@@ -7157,7 +6687,6 @@ var rules = [...]string{
"tiaa",
"tickets",
"tienda",
- "tiffany",
"tips",
"tires",
"tirol",
@@ -7180,7 +6709,6 @@ var rules = [...]string{
"trading",
"training",
"travel",
- "travelchannel",
"travelers",
"travelersinsurance",
"trust",
@@ -7225,7 +6753,6 @@ var rules = [...]string{
"voting",
"voto",
"voyage",
- "vuelos",
"wales",
"walmart",
"walter",
@@ -7316,7 +6843,6 @@ var rules = [...]string{
"xn--io0a7i",
"xn--j1aef",
"xn--jlq480n2rg",
- "xn--jlq61u9w7b",
"xn--jvr189m",
"xn--kcrx77d1x4a",
"xn--kput3i",
@@ -7379,17 +6905,35 @@ var rules = [...]string{
"graphox.us",
"*.devcdnaccesso.com",
"*.on-acorn.io",
+ "activetrail.biz",
"adobeaemcloud.com",
"*.dev.adobeaemcloud.com",
"hlx.live",
"adobeaemcloud.net",
"hlx.page",
"hlx3.page",
+ "adobeio-static.net",
+ "adobeioruntime.net",
"beep.pl",
"airkitapps.com",
"airkitapps-au.com",
"airkitapps.eu",
"aivencloud.com",
+ "akadns.net",
+ "akamai.net",
+ "akamai-staging.net",
+ "akamaiedge.net",
+ "akamaiedge-staging.net",
+ "akamaihd.net",
+ "akamaihd-staging.net",
+ "akamaiorigin.net",
+ "akamaiorigin-staging.net",
+ "akamaized.net",
+ "akamaized-staging.net",
+ "edgekey.net",
+ "edgekey-staging.net",
+ "edgesuite.net",
+ "edgesuite-staging.net",
"barsy.ca",
"*.compute.estate",
"*.alces.network",
@@ -7456,46 +7000,72 @@ var rules = [...]string{
"s3.dualstack.us-east-2.amazonaws.com",
"s3.us-east-2.amazonaws.com",
"s3-website.us-east-2.amazonaws.com",
+ "analytics-gateway.ap-northeast-1.amazonaws.com",
+ "analytics-gateway.eu-west-1.amazonaws.com",
+ "analytics-gateway.us-east-1.amazonaws.com",
+ "analytics-gateway.us-east-2.amazonaws.com",
+ "analytics-gateway.us-west-2.amazonaws.com",
+ "webview-assets.aws-cloud9.af-south-1.amazonaws.com",
"vfs.cloud9.af-south-1.amazonaws.com",
"webview-assets.cloud9.af-south-1.amazonaws.com",
+ "webview-assets.aws-cloud9.ap-east-1.amazonaws.com",
"vfs.cloud9.ap-east-1.amazonaws.com",
"webview-assets.cloud9.ap-east-1.amazonaws.com",
+ "webview-assets.aws-cloud9.ap-northeast-1.amazonaws.com",
"vfs.cloud9.ap-northeast-1.amazonaws.com",
"webview-assets.cloud9.ap-northeast-1.amazonaws.com",
+ "webview-assets.aws-cloud9.ap-northeast-2.amazonaws.com",
"vfs.cloud9.ap-northeast-2.amazonaws.com",
"webview-assets.cloud9.ap-northeast-2.amazonaws.com",
+ "webview-assets.aws-cloud9.ap-northeast-3.amazonaws.com",
"vfs.cloud9.ap-northeast-3.amazonaws.com",
"webview-assets.cloud9.ap-northeast-3.amazonaws.com",
+ "webview-assets.aws-cloud9.ap-south-1.amazonaws.com",
"vfs.cloud9.ap-south-1.amazonaws.com",
"webview-assets.cloud9.ap-south-1.amazonaws.com",
+ "webview-assets.aws-cloud9.ap-southeast-1.amazonaws.com",
"vfs.cloud9.ap-southeast-1.amazonaws.com",
"webview-assets.cloud9.ap-southeast-1.amazonaws.com",
+ "webview-assets.aws-cloud9.ap-southeast-2.amazonaws.com",
"vfs.cloud9.ap-southeast-2.amazonaws.com",
"webview-assets.cloud9.ap-southeast-2.amazonaws.com",
+ "webview-assets.aws-cloud9.ca-central-1.amazonaws.com",
"vfs.cloud9.ca-central-1.amazonaws.com",
"webview-assets.cloud9.ca-central-1.amazonaws.com",
+ "webview-assets.aws-cloud9.eu-central-1.amazonaws.com",
"vfs.cloud9.eu-central-1.amazonaws.com",
"webview-assets.cloud9.eu-central-1.amazonaws.com",
+ "webview-assets.aws-cloud9.eu-north-1.amazonaws.com",
"vfs.cloud9.eu-north-1.amazonaws.com",
"webview-assets.cloud9.eu-north-1.amazonaws.com",
+ "webview-assets.aws-cloud9.eu-south-1.amazonaws.com",
"vfs.cloud9.eu-south-1.amazonaws.com",
"webview-assets.cloud9.eu-south-1.amazonaws.com",
+ "webview-assets.aws-cloud9.eu-west-1.amazonaws.com",
"vfs.cloud9.eu-west-1.amazonaws.com",
"webview-assets.cloud9.eu-west-1.amazonaws.com",
+ "webview-assets.aws-cloud9.eu-west-2.amazonaws.com",
"vfs.cloud9.eu-west-2.amazonaws.com",
"webview-assets.cloud9.eu-west-2.amazonaws.com",
+ "webview-assets.aws-cloud9.eu-west-3.amazonaws.com",
"vfs.cloud9.eu-west-3.amazonaws.com",
"webview-assets.cloud9.eu-west-3.amazonaws.com",
+ "webview-assets.aws-cloud9.me-south-1.amazonaws.com",
"vfs.cloud9.me-south-1.amazonaws.com",
"webview-assets.cloud9.me-south-1.amazonaws.com",
+ "webview-assets.aws-cloud9.sa-east-1.amazonaws.com",
"vfs.cloud9.sa-east-1.amazonaws.com",
"webview-assets.cloud9.sa-east-1.amazonaws.com",
+ "webview-assets.aws-cloud9.us-east-1.amazonaws.com",
"vfs.cloud9.us-east-1.amazonaws.com",
"webview-assets.cloud9.us-east-1.amazonaws.com",
+ "webview-assets.aws-cloud9.us-east-2.amazonaws.com",
"vfs.cloud9.us-east-2.amazonaws.com",
"webview-assets.cloud9.us-east-2.amazonaws.com",
+ "webview-assets.aws-cloud9.us-west-1.amazonaws.com",
"vfs.cloud9.us-west-1.amazonaws.com",
"webview-assets.cloud9.us-west-1.amazonaws.com",
+ "webview-assets.aws-cloud9.us-west-2.amazonaws.com",
"vfs.cloud9.us-west-2.amazonaws.com",
"webview-assets.cloud9.us-west-2.amazonaws.com",
"cn-north-1.eb.amazonaws.com.cn",
@@ -7542,6 +7112,7 @@ var rules = [...]string{
"myasustor.com",
"cdn.prod.atlassian-dev.net",
"translated.page",
+ "autocode.dev",
"myfritz.net",
"onavstack.net",
"*.awdev.ca",
@@ -7588,6 +7159,8 @@ var rules = [...]string{
"vm.bytemark.co.uk",
"cafjs.com",
"mycd.eu",
+ "canva-apps.cn",
+ "canva-apps.com",
"drr.ac",
"uwu.ai",
"carrd.co",
@@ -7653,8 +7226,11 @@ var rules = [...]string{
"cloudcontrolled.com",
"cloudcontrolapp.com",
"*.cloudera.site",
- "pages.dev",
+ "cf-ipfs.com",
+ "cloudflare-ipfs.com",
"trycloudflare.com",
+ "pages.dev",
+ "r2.dev",
"workers.dev",
"wnext.app",
"co.ca",
@@ -8227,6 +7803,7 @@ var rules = [...]string{
"channelsdvr.net",
"u.channelsdvr.net",
"edgecompute.app",
+ "fastly-edge.com",
"fastly-terrarium.com",
"fastlylb.net",
"map.fastlylb.net",
@@ -8566,6 +8143,7 @@ var rules = [...]string{
"ngo.ng",
"edu.scot",
"sch.so",
+ "ie.ua",
"hostyhosting.io",
"xn--hkkinen-5wa.fi",
"*.moonscale.io",
@@ -8633,7 +8211,6 @@ var rules = [...]string{
"iobb.net",
"mel.cloudlets.com.au",
"cloud.interhostsolutions.be",
- "users.scale.virtualcloud.com.br",
"mycloud.by",
"alp1.ae.flow.ch",
"appengine.flow.ch",
@@ -8657,9 +8234,7 @@ var rules = [...]string{
"de.trendhosting.cloud",
"jele.club",
"amscompute.com",
- "clicketcloud.com",
"dopaas.com",
- "hidora.com",
"paas.hosted-by-previder.com",
"rag-cloud.hosteur.com",
"rag-cloud-ch.hosteur.com",
@@ -8834,6 +8409,7 @@ var rules = [...]string{
"azurestaticapps.net",
"1.azurestaticapps.net",
"2.azurestaticapps.net",
+ "3.azurestaticapps.net",
"centralus.azurestaticapps.net",
"eastasia.azurestaticapps.net",
"eastus2.azurestaticapps.net",
@@ -8864,7 +8440,19 @@ var rules = [...]string{
"cloud.nospamproxy.com",
"netlify.app",
"4u.com",
+ "ngrok.app",
+ "ngrok-free.app",
+ "ngrok.dev",
+ "ngrok-free.dev",
"ngrok.io",
+ "ap.ngrok.io",
+ "au.ngrok.io",
+ "eu.ngrok.io",
+ "in.ngrok.io",
+ "jp.ngrok.io",
+ "sa.ngrok.io",
+ "us.ngrok.io",
+ "ngrok.pizza",
"nh-serv.co.uk",
"nfshost.com",
"*.developer.app",
@@ -9084,6 +8672,7 @@ var rules = [...]string{
"eu.pythonanywhere.com",
"qoto.io",
"qualifioapp.com",
+ "ladesk.com",
"qbuser.com",
"cloudsite.builders",
"instances.spawn.cc",
@@ -9132,6 +8721,53 @@ var rules = [...]string{
"xn--h1aliz.xn--p1acf",
"xn--90a1af.xn--p1acf",
"xn--41a.xn--p1acf",
+ "180r.com",
+ "dojin.com",
+ "sakuratan.com",
+ "sakuraweb.com",
+ "x0.com",
+ "2-d.jp",
+ "bona.jp",
+ "crap.jp",
+ "daynight.jp",
+ "eek.jp",
+ "flop.jp",
+ "halfmoon.jp",
+ "jeez.jp",
+ "matrix.jp",
+ "mimoza.jp",
+ "ivory.ne.jp",
+ "mail-box.ne.jp",
+ "mints.ne.jp",
+ "mokuren.ne.jp",
+ "opal.ne.jp",
+ "sakura.ne.jp",
+ "sumomo.ne.jp",
+ "topaz.ne.jp",
+ "netgamers.jp",
+ "nyanta.jp",
+ "o0o0.jp",
+ "rdy.jp",
+ "rgr.jp",
+ "rulez.jp",
+ "s3.isk01.sakurastorage.jp",
+ "s3.isk02.sakurastorage.jp",
+ "saloon.jp",
+ "sblo.jp",
+ "skr.jp",
+ "tank.jp",
+ "uh-oh.jp",
+ "undo.jp",
+ "rs.webaccel.jp",
+ "user.webaccel.jp",
+ "websozai.jp",
+ "xii.jp",
+ "squares.net",
+ "jpn.org",
+ "kirara.st",
+ "x0.to",
+ "from.tv",
+ "sakura.tv",
"*.builder.code.com",
"*.dev-builder.code.com",
"*.stg-builder.code.com",
@@ -9204,6 +8840,9 @@ var rules = [...]string{
"beta.bounty-full.com",
"small-web.org",
"vp4.me",
+ "snowflake.app",
+ "privatelink.snowflake.app",
+ "streamlit.app",
"streamlitapp.com",
"try-snowplow.com",
"srht.site",
@@ -9243,6 +8882,7 @@ var rules = [...]string{
"myspreadshop.se",
"myspreadshop.co.uk",
"api.stdlib.com",
+ "storipress.app",
"storj.farm",
"utwente.io",
"soc.srcf.net",
@@ -9272,6 +8912,8 @@ var rules = [...]string{
"vpnplus.to",
"direct.quickconnect.to",
"tabitorder.co.il",
+ "mytabit.co.il",
+ "mytabit.com",
"taifun-dns.de",
"beta.tailscale.net",
"ts.net",
@@ -9350,6 +8992,7 @@ var rules = [...]string{
"hk.org",
"ltd.hk",
"inc.hk",
+ "it.com",
"name.pm",
"sch.tf",
"biz.wf",
@@ -9472,7 +9115,6 @@ var rules = [...]string{
var nodeLabels = [...]string{
"aaa",
"aarp",
- "abarth",
"abb",
"abbott",
"abbvie",
@@ -9488,7 +9130,6 @@ var nodeLabels = [...]string{
"aco",
"actor",
"ad",
- "adac",
"ads",
"adult",
"ae",
@@ -9508,7 +9149,6 @@ var nodeLabels = [...]string{
"airtel",
"akdn",
"al",
- "alfaromeo",
"alibaba",
"alipay",
"allfinanz",
@@ -9750,7 +9390,6 @@ var nodeLabels = [...]string{
"contact",
"contractors",
"cooking",
- "cookingchannel",
"cool",
"coop",
"corsica",
@@ -9882,7 +9521,6 @@ var nodeLabels = [...]string{
"ferrari",
"ferrero",
"fi",
- "fiat",
"fidelity",
"fido",
"film",
@@ -9908,7 +9546,6 @@ var nodeLabels = [...]string{
"fo",
"foo",
"food",
- "foodnetwork",
"football",
"ford",
"forex",
@@ -10014,7 +9651,6 @@ var nodeLabels = [...]string{
"helsinki",
"here",
"hermes",
- "hgtv",
"hiphop",
"hisamitsu",
"hitachi",
@@ -10036,7 +9672,6 @@ var nodeLabels = [...]string{
"host",
"hosting",
"hot",
- "hoteles",
"hotels",
"hotmail",
"house",
@@ -10149,7 +9784,6 @@ var nodeLabels = [...]string{
"lamborghini",
"lamer",
"lancaster",
- "lancia",
"land",
"landrover",
"lanxess",
@@ -10180,7 +9814,6 @@ var nodeLabels = [...]string{
"limited",
"limo",
"lincoln",
- "linde",
"link",
"lipsy",
"live",
@@ -10192,7 +9825,6 @@ var nodeLabels = [...]string{
"loans",
"locker",
"locus",
- "loft",
"lol",
"london",
"lotte",
@@ -10212,7 +9844,6 @@ var nodeLabels = [...]string{
"lv",
"ly",
"ma",
- "macys",
"madrid",
"maif",
"maison",
@@ -10226,7 +9857,6 @@ var nodeLabels = [...]string{
"markets",
"marriott",
"marshalls",
- "maserati",
"mattel",
"mba",
"mc",
@@ -10286,7 +9916,6 @@ var nodeLabels = [...]string{
"mu",
"museum",
"music",
- "mutual",
"mv",
"mw",
"mx",
@@ -10374,7 +10003,6 @@ var nodeLabels = [...]string{
"partners",
"parts",
"party",
- "passagens",
"pay",
"pccw",
"pe",
@@ -10530,7 +10158,6 @@ var nodeLabels = [...]string{
"select",
"sener",
"services",
- "ses",
"seven",
"sew",
"sex",
@@ -10647,7 +10274,6 @@ var nodeLabels = [...]string{
"tiaa",
"tickets",
"tienda",
- "tiffany",
"tips",
"tires",
"tirol",
@@ -10677,7 +10303,6 @@ var nodeLabels = [...]string{
"trading",
"training",
"travel",
- "travelchannel",
"travelers",
"travelersinsurance",
"trust",
@@ -10739,7 +10364,6 @@ var nodeLabels = [...]string{
"voto",
"voyage",
"vu",
- "vuelos",
"wales",
"walmart",
"walter",
@@ -10856,7 +10480,6 @@ var nodeLabels = [...]string{
"xn--j1amh",
"xn--j6w193g",
"xn--jlq480n2rg",
- "xn--jlq61u9w7b",
"xn--jvr189m",
"xn--kcrx77d1x4a",
"xn--kprw13d",
@@ -11119,18 +10742,24 @@ var nodeLabels = [...]string{
"loginline",
"messerli",
"netlify",
+ "ngrok",
+ "ngrok-free",
"noop",
"northflank",
"ondigitalocean",
"onflashdrive",
"platform0",
"run",
+ "snowflake",
+ "storipress",
+ "streamlit",
"telebit",
"typedream",
"vercel",
"web",
"wnext",
"a",
+ "privatelink",
"bet",
"com",
"coop",
@@ -11316,6 +10945,7 @@ var nodeLabels = [...]string{
"edu",
"or",
"org",
+ "activetrail",
"cloudns",
"dscloud",
"dyndns",
@@ -11330,10 +10960,27 @@ var nodeLabels = [...]string{
"orx",
"selfip",
"webhop",
- "asso",
- "barreau",
+ "africa",
+ "agro",
+ "architectes",
+ "assur",
+ "avocats",
"blogspot",
- "gouv",
+ "co",
+ "com",
+ "eco",
+ "econo",
+ "edu",
+ "info",
+ "loisirs",
+ "money",
+ "net",
+ "org",
+ "ote",
+ "restaurant",
+ "resto",
+ "tourism",
+ "univ",
"com",
"edu",
"gov",
@@ -11529,9 +11176,6 @@ var nodeLabels = [...]string{
"zlg",
"blogspot",
"simplesite",
- "virtualcloud",
- "scale",
- "users",
"ac",
"al",
"am",
@@ -11772,6 +11416,7 @@ var nodeLabels = [...]string{
"ac",
"ah",
"bj",
+ "canva-apps",
"com",
"cq",
"edu",
@@ -11853,6 +11498,7 @@ var nodeLabels = [...]string{
"owo",
"001www",
"0emm",
+ "180r",
"1kapp",
"3utilities",
"4u",
@@ -11888,11 +11534,13 @@ var nodeLabels = [...]string{
"br",
"builtwithdark",
"cafjs",
+ "canva-apps",
"cechire",
+ "cf-ipfs",
"ciscofreak",
- "clicketcloud",
"cloudcontrolapp",
"cloudcontrolled",
+ "cloudflare-ipfs",
"cn",
"co",
"code",
@@ -11919,6 +11567,7 @@ var nodeLabels = [...]string{
"dnsdojo",
"dnsiskinky",
"doesntexist",
+ "dojin",
"dontexist",
"doomdns",
"dopaas",
@@ -11951,6 +11600,7 @@ var nodeLabels = [...]string{
"eu",
"evennode",
"familyds",
+ "fastly-edge",
"fastly-terrarium",
"fastvps-server",
"fbsbx",
@@ -12024,7 +11674,6 @@ var nodeLabels = [...]string{
"health-carereform",
"herokuapp",
"herokussl",
- "hidora",
"hk",
"hobby-site",
"homelinux",
@@ -12098,6 +11747,7 @@ var nodeLabels = [...]string{
"isa-geek",
"isa-hockeynut",
"issmarterthanyou",
+ "it",
"jdevcloud",
"jelastic",
"joyent",
@@ -12107,6 +11757,7 @@ var nodeLabels = [...]string{
"kozow",
"kr",
"ktistory",
+ "ladesk",
"likes-pie",
"likescandy",
"linode",
@@ -12133,6 +11784,7 @@ var nodeLabels = [...]string{
"myshopblocks",
"myshopify",
"myspreadshop",
+ "mytabit",
"mythic-beasts",
"mytuleap",
"myvnc",
@@ -12179,6 +11831,8 @@ var nodeLabels = [...]string{
"rhcloud",
"ru",
"sa",
+ "sakuratan",
+ "sakuraweb",
"saves-the-whales",
"scrysec",
"securitytactics",
@@ -12241,6 +11895,7 @@ var nodeLabels = [...]string{
"wphostedmail",
"wpmucdn",
"writesthisblog",
+ "x0",
"xnbay",
"yolasite",
"za",
@@ -12295,107 +11950,154 @@ var nodeLabels = [...]string{
"us-east-2",
"us-west-1",
"us-west-2",
+ "aws-cloud9",
"cloud9",
+ "webview-assets",
"vfs",
"webview-assets",
+ "aws-cloud9",
"cloud9",
+ "webview-assets",
"vfs",
"webview-assets",
+ "analytics-gateway",
+ "aws-cloud9",
"cloud9",
"dualstack",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
"dualstack",
"s3",
"s3-website",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
+ "webview-assets",
"vfs",
"webview-assets",
+ "aws-cloud9",
"cloud9",
"dualstack",
"s3",
"s3-website",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
"dualstack",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
"dualstack",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
"dualstack",
"s3",
"s3-website",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
"dualstack",
"s3",
"s3-website",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
+ "webview-assets",
"vfs",
"webview-assets",
+ "aws-cloud9",
"cloud9",
+ "webview-assets",
"vfs",
"webview-assets",
+ "analytics-gateway",
+ "aws-cloud9",
"cloud9",
"dualstack",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
"dualstack",
"s3",
"s3-website",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
"dualstack",
"s3",
"s3-website",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
+ "webview-assets",
"vfs",
"webview-assets",
+ "aws-cloud9",
"cloud9",
"dualstack",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "analytics-gateway",
+ "aws-cloud9",
"cloud9",
"dualstack",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "analytics-gateway",
+ "aws-cloud9",
"cloud9",
"dualstack",
"s3",
"s3-website",
+ "webview-assets",
"vfs",
"webview-assets",
"s3",
+ "aws-cloud9",
"cloud9",
+ "webview-assets",
"vfs",
"webview-assets",
+ "analytics-gateway",
+ "aws-cloud9",
"cloud9",
+ "webview-assets",
"vfs",
"webview-assets",
"r",
@@ -12610,6 +12312,7 @@ var nodeLabels = [...]string{
"pages",
"customer",
"bss",
+ "autocode",
"curv",
"deno",
"deno-staging",
@@ -12623,8 +12326,11 @@ var nodeLabels = [...]string{
"localcert",
"loginline",
"mediatech",
+ "ngrok",
+ "ngrok-free",
"pages",
"platter-app",
+ "r2",
"shiftcrypto",
"stg",
"stgstage",
@@ -13016,6 +12722,7 @@ var nodeLabels = [...]string{
"net",
"org",
"blogspot",
+ "mytabit",
"ravpage",
"tabitorder",
"ac",
@@ -13176,6 +12883,13 @@ var nodeLabels = [...]string{
"dyndns",
"id",
"apps",
+ "ap",
+ "au",
+ "eu",
+ "in",
+ "jp",
+ "sa",
+ "us",
"stage",
"mock",
"sys",
@@ -13649,6 +13363,7 @@ var nodeLabels = [...]string{
"net",
"org",
"sch",
+ "2-d",
"ac",
"ad",
"aichi",
@@ -13662,6 +13377,7 @@ var nodeLabels = [...]string{
"bitter",
"blogspot",
"blush",
+ "bona",
"boo",
"boy",
"boyfriend",
@@ -13682,18 +13398,22 @@ var nodeLabels = [...]string{
"cocotte",
"coolblog",
"cranky",
+ "crap",
"cutegirl",
"daa",
+ "daynight",
"deca",
"deci",
"digick",
"ed",
+ "eek",
"egoism",
"ehime",
"fakefur",
"fashionstore",
"fem",
"flier",
+ "flop",
"floppy",
"fool",
"frenchkiss",
@@ -13710,6 +13430,7 @@ var nodeLabels = [...]string{
"greater",
"gunma",
"hacca",
+ "halfmoon",
"handcrafted",
"heavy",
"her",
@@ -13725,6 +13446,7 @@ var nodeLabels = [...]string{
"ishikawa",
"itigo",
"iwate",
+ "jeez",
"jellybean",
"kagawa",
"kagoshima",
@@ -13748,7 +13470,9 @@ var nodeLabels = [...]string{
"lovepop",
"lovesick",
"main",
+ "matrix",
"mie",
+ "mimoza",
"miyagi",
"miyazaki",
"mods",
@@ -13761,10 +13485,13 @@ var nodeLabels = [...]string{
"namaste",
"nara",
"ne",
+ "netgamers",
"niigata",
"nikita",
"nobushi",
"noor",
+ "nyanta",
+ "o0o0",
"oita",
"okayama",
"okinawa",
@@ -13785,22 +13512,30 @@ var nodeLabels = [...]string{
"pussycat",
"pya",
"raindrop",
+ "rdy",
"readymade",
+ "rgr",
+ "rulez",
"sadist",
"saga",
"saitama",
+ "sakurastorage",
+ "saloon",
"sapporo",
+ "sblo",
"schoolbus",
"secret",
"sendai",
"shiga",
"shimane",
"shizuoka",
+ "skr",
"staba",
"stripper",
"sub",
"sunnyday",
"supersale",
+ "tank",
"theshop",
"thick",
"tochigi",
@@ -13809,7 +13544,9 @@ var nodeLabels = [...]string{
"tonkotsu",
"tottori",
"toyama",
+ "uh-oh",
"under",
+ "undo",
"upper",
"usercontent",
"velvet",
@@ -13818,8 +13555,11 @@ var nodeLabels = [...]string{
"vivian",
"wakayama",
"watson",
+ "webaccel",
"weblike",
+ "websozai",
"whitesnow",
+ "xii",
"xn--0trq7p7nn",
"xn--1ctwo",
"xn--1lqs03n",
@@ -14954,6 +14694,14 @@ var nodeLabels = [...]string{
"yoshino",
"aseinet",
"gehirn",
+ "ivory",
+ "mail-box",
+ "mints",
+ "mokuren",
+ "opal",
+ "sakura",
+ "sumomo",
+ "topaz",
"user",
"aga",
"agano",
@@ -15221,6 +14969,10 @@ var nodeLabels = [...]string{
"yoshida",
"yoshikawa",
"yoshimi",
+ "isk01",
+ "isk02",
+ "s3",
+ "s3",
"city",
"city",
"aisho",
@@ -15476,6 +15228,8 @@ var nodeLabels = [...]string{
"wakayama",
"yuasa",
"yura",
+ "rs",
+ "user",
"asahi",
"funagata",
"higashine",
@@ -15865,552 +15619,6 @@ var nodeLabels = [...]string{
"net",
"or",
"org",
- "academy",
- "agriculture",
- "air",
- "airguard",
- "alabama",
- "alaska",
- "amber",
- "ambulance",
- "american",
- "americana",
- "americanantiques",
- "americanart",
- "amsterdam",
- "and",
- "annefrank",
- "anthro",
- "anthropology",
- "antiques",
- "aquarium",
- "arboretum",
- "archaeological",
- "archaeology",
- "architecture",
- "art",
- "artanddesign",
- "artcenter",
- "artdeco",
- "arteducation",
- "artgallery",
- "arts",
- "artsandcrafts",
- "asmatart",
- "assassination",
- "assisi",
- "association",
- "astronomy",
- "atlanta",
- "austin",
- "australia",
- "automotive",
- "aviation",
- "axis",
- "badajoz",
- "baghdad",
- "bahn",
- "bale",
- "baltimore",
- "barcelona",
- "baseball",
- "basel",
- "baths",
- "bauern",
- "beauxarts",
- "beeldengeluid",
- "bellevue",
- "bergbau",
- "berkeley",
- "berlin",
- "bern",
- "bible",
- "bilbao",
- "bill",
- "birdart",
- "birthplace",
- "bonn",
- "boston",
- "botanical",
- "botanicalgarden",
- "botanicgarden",
- "botany",
- "brandywinevalley",
- "brasil",
- "bristol",
- "british",
- "britishcolumbia",
- "broadcast",
- "brunel",
- "brussel",
- "brussels",
- "bruxelles",
- "building",
- "burghof",
- "bus",
- "bushey",
- "cadaques",
- "california",
- "cambridge",
- "can",
- "canada",
- "capebreton",
- "carrier",
- "cartoonart",
- "casadelamoneda",
- "castle",
- "castres",
- "celtic",
- "center",
- "chattanooga",
- "cheltenham",
- "chesapeakebay",
- "chicago",
- "children",
- "childrens",
- "childrensgarden",
- "chiropractic",
- "chocolate",
- "christiansburg",
- "cincinnati",
- "cinema",
- "circus",
- "civilisation",
- "civilization",
- "civilwar",
- "clinton",
- "clock",
- "coal",
- "coastaldefence",
- "cody",
- "coldwar",
- "collection",
- "colonialwilliamsburg",
- "coloradoplateau",
- "columbia",
- "columbus",
- "communication",
- "communications",
- "community",
- "computer",
- "computerhistory",
- "contemporary",
- "contemporaryart",
- "convent",
- "copenhagen",
- "corporation",
- "corvette",
- "costume",
- "countryestate",
- "county",
- "crafts",
- "cranbrook",
- "creation",
- "cultural",
- "culturalcenter",
- "culture",
- "cyber",
- "cymru",
- "dali",
- "dallas",
- "database",
- "ddr",
- "decorativearts",
- "delaware",
- "delmenhorst",
- "denmark",
- "depot",
- "design",
- "detroit",
- "dinosaur",
- "discovery",
- "dolls",
- "donostia",
- "durham",
- "eastafrica",
- "eastcoast",
- "education",
- "educational",
- "egyptian",
- "eisenbahn",
- "elburg",
- "elvendrell",
- "embroidery",
- "encyclopedic",
- "england",
- "entomology",
- "environment",
- "environmentalconservation",
- "epilepsy",
- "essex",
- "estate",
- "ethnology",
- "exeter",
- "exhibition",
- "family",
- "farm",
- "farmequipment",
- "farmers",
- "farmstead",
- "field",
- "figueres",
- "filatelia",
- "film",
- "fineart",
- "finearts",
- "finland",
- "flanders",
- "florida",
- "force",
- "fortmissoula",
- "fortworth",
- "foundation",
- "francaise",
- "frankfurt",
- "franziskaner",
- "freemasonry",
- "freiburg",
- "fribourg",
- "frog",
- "fundacio",
- "furniture",
- "gallery",
- "garden",
- "gateway",
- "geelvinck",
- "gemological",
- "geology",
- "georgia",
- "giessen",
- "glas",
- "glass",
- "gorge",
- "grandrapids",
- "graz",
- "guernsey",
- "halloffame",
- "hamburg",
- "handson",
- "harvestcelebration",
- "hawaii",
- "health",
- "heimatunduhren",
- "hellas",
- "helsinki",
- "hembygdsforbund",
- "heritage",
- "histoire",
- "historical",
- "historicalsociety",
- "historichouses",
- "historisch",
- "historisches",
- "history",
- "historyofscience",
- "horology",
- "house",
- "humanities",
- "illustration",
- "imageandsound",
- "indian",
- "indiana",
- "indianapolis",
- "indianmarket",
- "intelligence",
- "interactive",
- "iraq",
- "iron",
- "isleofman",
- "jamison",
- "jefferson",
- "jerusalem",
- "jewelry",
- "jewish",
- "jewishart",
- "jfk",
- "journalism",
- "judaica",
- "judygarland",
- "juedisches",
- "juif",
- "karate",
- "karikatur",
- "kids",
- "koebenhavn",
- "koeln",
- "kunst",
- "kunstsammlung",
- "kunstunddesign",
- "labor",
- "labour",
- "lajolla",
- "lancashire",
- "landes",
- "lans",
- "larsson",
- "lewismiller",
- "lincoln",
- "linz",
- "living",
- "livinghistory",
- "localhistory",
- "london",
- "losangeles",
- "louvre",
- "loyalist",
- "lucerne",
- "luxembourg",
- "luzern",
- "mad",
- "madrid",
- "mallorca",
- "manchester",
- "mansion",
- "mansions",
- "manx",
- "marburg",
- "maritime",
- "maritimo",
- "maryland",
- "marylhurst",
- "media",
- "medical",
- "medizinhistorisches",
- "meeres",
- "memorial",
- "mesaverde",
- "michigan",
- "midatlantic",
- "military",
- "mill",
- "miners",
- "mining",
- "minnesota",
- "missile",
- "missoula",
- "modern",
- "moma",
- "money",
- "monmouth",
- "monticello",
- "montreal",
- "moscow",
- "motorcycle",
- "muenchen",
- "muenster",
- "mulhouse",
- "muncie",
- "museet",
- "museumcenter",
- "museumvereniging",
- "music",
- "national",
- "nationalfirearms",
- "nationalheritage",
- "nativeamerican",
- "naturalhistory",
- "naturalhistorymuseum",
- "naturalsciences",
- "nature",
- "naturhistorisches",
- "natuurwetenschappen",
- "naumburg",
- "naval",
- "nebraska",
- "neues",
- "newhampshire",
- "newjersey",
- "newmexico",
- "newport",
- "newspaper",
- "newyork",
- "niepce",
- "norfolk",
- "north",
- "nrw",
- "nyc",
- "nyny",
- "oceanographic",
- "oceanographique",
- "omaha",
- "online",
- "ontario",
- "openair",
- "oregon",
- "oregontrail",
- "otago",
- "oxford",
- "pacific",
- "paderborn",
- "palace",
- "paleo",
- "palmsprings",
- "panama",
- "paris",
- "pasadena",
- "pharmacy",
- "philadelphia",
- "philadelphiaarea",
- "philately",
- "phoenix",
- "photography",
- "pilots",
- "pittsburgh",
- "planetarium",
- "plantation",
- "plants",
- "plaza",
- "portal",
- "portland",
- "portlligat",
- "posts-and-telecommunications",
- "preservation",
- "presidio",
- "press",
- "project",
- "public",
- "pubol",
- "quebec",
- "railroad",
- "railway",
- "research",
- "resistance",
- "riodejaneiro",
- "rochester",
- "rockart",
- "roma",
- "russia",
- "saintlouis",
- "salem",
- "salvadordali",
- "salzburg",
- "sandiego",
- "sanfrancisco",
- "santabarbara",
- "santacruz",
- "santafe",
- "saskatchewan",
- "satx",
- "savannahga",
- "schlesisches",
- "schoenbrunn",
- "schokoladen",
- "school",
- "schweiz",
- "science",
- "science-fiction",
- "scienceandhistory",
- "scienceandindustry",
- "sciencecenter",
- "sciencecenters",
- "sciencehistory",
- "sciences",
- "sciencesnaturelles",
- "scotland",
- "seaport",
- "settlement",
- "settlers",
- "shell",
- "sherbrooke",
- "sibenik",
- "silk",
- "ski",
- "skole",
- "society",
- "sologne",
- "soundandvision",
- "southcarolina",
- "southwest",
- "space",
- "spy",
- "square",
- "stadt",
- "stalbans",
- "starnberg",
- "state",
- "stateofdelaware",
- "station",
- "steam",
- "steiermark",
- "stjohn",
- "stockholm",
- "stpetersburg",
- "stuttgart",
- "suisse",
- "surgeonshall",
- "surrey",
- "svizzera",
- "sweden",
- "sydney",
- "tank",
- "tcm",
- "technology",
- "telekommunikation",
- "television",
- "texas",
- "textile",
- "theater",
- "time",
- "timekeeping",
- "topology",
- "torino",
- "touch",
- "town",
- "transport",
- "tree",
- "trolley",
- "trust",
- "trustee",
- "uhren",
- "ulm",
- "undersea",
- "university",
- "usa",
- "usantiques",
- "usarts",
- "uscountryestate",
- "usculture",
- "usdecorativearts",
- "usgarden",
- "ushistory",
- "ushuaia",
- "uslivinghistory",
- "utah",
- "uvic",
- "valley",
- "vantaa",
- "versailles",
- "viking",
- "village",
- "virginia",
- "virtual",
- "virtuel",
- "vlaanderen",
- "volkenkunde",
- "wales",
- "wallonie",
- "war",
- "washingtondc",
- "watch-and-clock",
- "watchandclock",
- "western",
- "westfalen",
- "whaling",
- "wildlife",
- "williamsburg",
- "windmill",
- "workshop",
- "xn--9dbhblg6di",
- "xn--comunicaes-v6a2o",
- "xn--correios-e-telecomunicaes-ghc29a",
- "xn--h1aegh",
- "xn--lns-qla",
- "york",
- "yorkshire",
- "yosemite",
- "youth",
- "zoological",
- "zoology",
"aero",
"biz",
"com",
@@ -16483,6 +15691,19 @@ var nodeLabels = [...]string{
"asso",
"nom",
"adobeaemcloud",
+ "adobeio-static",
+ "adobeioruntime",
+ "akadns",
+ "akamai",
+ "akamai-staging",
+ "akamaiedge",
+ "akamaiedge-staging",
+ "akamaihd",
+ "akamaihd-staging",
+ "akamaiorigin",
+ "akamaiorigin-staging",
+ "akamaized",
+ "akamaized-staging",
"alwaysdata",
"appudo",
"at-band-camp",
@@ -16532,6 +15753,10 @@ var nodeLabels = [...]string{
"dynv6",
"eating-organic",
"edgeapp",
+ "edgekey",
+ "edgekey-staging",
+ "edgesuite",
+ "edgesuite-staging",
"elastx",
"endofinternet",
"familyds",
@@ -16612,6 +15837,7 @@ var nodeLabels = [...]string{
"shopselect",
"siteleaf",
"square7",
+ "squares",
"srcf",
"static-access",
"supabase",
@@ -16634,6 +15860,7 @@ var nodeLabels = [...]string{
"cdn",
"1",
"2",
+ "3",
"centralus",
"eastasia",
"eastus2",
@@ -17619,6 +16846,7 @@ var nodeLabels = [...]string{
"is-very-nice",
"is-very-sweet",
"isa-geek",
+ "jpn",
"js",
"kicks-ass",
"mayfirst",
@@ -17774,6 +17002,7 @@ var nodeLabels = [...]string{
"org",
"framer",
"1337",
+ "ngrok",
"biz",
"com",
"edu",
@@ -17978,12 +17207,17 @@ var nodeLabels = [...]string{
"kwpsp",
"mup",
"mw",
+ "oia",
"oirm",
+ "oke",
+ "oow",
+ "oschr",
"oum",
"pa",
"pinb",
"piw",
"po",
+ "pr",
"psp",
"psse",
"pup",
@@ -18009,11 +17243,14 @@ var nodeLabels = [...]string{
"wios",
"witd",
"wiw",
+ "wkz",
"wsa",
"wskr",
+ "wsse",
"wuoz",
"wzmiuw",
"zp",
+ "zpisdn",
"co",
"name",
"own",
@@ -18355,6 +17592,7 @@ var nodeLabels = [...]string{
"consulado",
"edu",
"embaixada",
+ "kirara",
"mil",
"net",
"noho",
@@ -18501,6 +17739,7 @@ var nodeLabels = [...]string{
"quickconnect",
"rdv",
"vpnplus",
+ "x0",
"direct",
"prequalifyme",
"now-dns",
@@ -18549,7 +17788,9 @@ var nodeLabels = [...]string{
"travel",
"better-than",
"dyndns",
+ "from",
"on-the-web",
+ "sakura",
"worse-than",
"blogspot",
"club",
@@ -18602,6 +17843,7 @@ var nodeLabels = [...]string{
"dp",
"edu",
"gov",
+ "ie",
"if",
"in",
"inf",
@@ -18616,6 +17858,7 @@ var nodeLabels = [...]string{
"kirovograd",
"km",
"kr",
+ "kropyvnytskyi",
"krym",
"ks",
"kv",
@@ -19010,18 +18253,84 @@ var nodeLabels = [...]string{
"net",
"org",
"ac",
+ "ai",
+ "angiang",
+ "bacgiang",
+ "backan",
+ "baclieu",
+ "bacninh",
+ "baria-vungtau",
+ "bentre",
+ "binhdinh",
+ "binhduong",
+ "binhphuoc",
+ "binhthuan",
"biz",
"blogspot",
+ "camau",
+ "cantho",
+ "caobang",
"com",
+ "daklak",
+ "daknong",
+ "danang",
+ "dienbien",
+ "dongnai",
+ "dongthap",
"edu",
+ "gialai",
"gov",
+ "hagiang",
+ "haiduong",
+ "haiphong",
+ "hanam",
+ "hanoi",
+ "hatinh",
+ "haugiang",
"health",
+ "hoabinh",
+ "hungyen",
+ "id",
"info",
"int",
+ "io",
+ "khanhhoa",
+ "kiengiang",
+ "kontum",
+ "laichau",
+ "lamdong",
+ "langson",
+ "laocai",
+ "longan",
+ "namdinh",
"name",
"net",
+ "nghean",
+ "ninhbinh",
+ "ninhthuan",
"org",
+ "phutho",
+ "phuyen",
"pro",
+ "quangbinh",
+ "quangnam",
+ "quangngai",
+ "quangninh",
+ "quangtri",
+ "soctrang",
+ "sonla",
+ "tayninh",
+ "thaibinh",
+ "thainguyen",
+ "thanhhoa",
+ "thanhphohochiminh",
+ "thuathienhue",
+ "tiengiang",
+ "travinh",
+ "tuyenquang",
+ "vinhlong",
+ "vinhphuc",
+ "yenbai",
"blog",
"cn",
"com",
diff --git a/internal/quic/ack_delay.go b/quic/ack_delay.go
similarity index 100%
rename from internal/quic/ack_delay.go
rename to quic/ack_delay.go
diff --git a/internal/quic/ack_delay_test.go b/quic/ack_delay_test.go
similarity index 100%
rename from internal/quic/ack_delay_test.go
rename to quic/ack_delay_test.go
diff --git a/quic/acks.go b/quic/acks.go
new file mode 100644
index 000000000..039b7b46e
--- /dev/null
+++ b/quic/acks.go
@@ -0,0 +1,191 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "time"
+)
+
+// ackState tracks packets received from a peer within a number space.
+// It handles packet deduplication (don't process the same packet twice) and
+// determines the timing and content of ACK frames.
+type ackState struct {
+ seen rangeset[packetNumber]
+
+ // The time at which we must send an ACK frame, even if we have no other data to send.
+ nextAck time.Time
+
+ // The time we received the largest-numbered packet in seen.
+ maxRecvTime time.Time
+
+ // The largest-numbered ack-eliciting packet in seen.
+ maxAckEliciting packetNumber
+
+ // The number of ack-eliciting packets in seen that we have not yet acknowledged.
+ unackedAckEliciting int
+}
+
+// shouldProcess reports whether a packet should be handled or discarded.
+func (acks *ackState) shouldProcess(num packetNumber) bool {
+ if packetNumber(acks.seen.min()) > num {
+ // We've discarded the state for this range of packet numbers.
+ // Discard the packet rather than potentially processing a duplicate.
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.3-5
+ return false
+ }
+ if acks.seen.contains(num) {
+ // Discard duplicate packets.
+ return false
+ }
+ return true
+}
+
+// receive records receipt of a packet.
+func (acks *ackState) receive(now time.Time, space numberSpace, num packetNumber, ackEliciting bool) {
+ if ackEliciting {
+ acks.unackedAckEliciting++
+ if acks.mustAckImmediately(space, num) {
+ acks.nextAck = now
+ } else if acks.nextAck.IsZero() {
+ // This packet does not need to be acknowledged immediately,
+ // but the ack must not be intentionally delayed by more than
+ // the max_ack_delay transport parameter we sent to the peer.
+ //
+ // We always delay acks by the maximum allowed, less the timer
+ // granularity. ("[max_ack_delay] SHOULD include the receiver's
+ // expected delays in alarms firing.")
+ //
+ // https://www.rfc-editor.org/rfc/rfc9000#section-18.2-4.28.1
+ acks.nextAck = now.Add(maxAckDelay - timerGranularity)
+ }
+ if num > acks.maxAckEliciting {
+ acks.maxAckEliciting = num
+ }
+ }
+
+ acks.seen.add(num, num+1)
+ if num == acks.seen.max() {
+ acks.maxRecvTime = now
+ }
+
+ // Limit the total number of ACK ranges by dropping older ranges.
+ //
+ // Remembering more ranges results in larger ACK frames.
+ //
+ // Remembering a large number of ranges could result in ACK frames becoming
+ // too large to fit in a packet, in which case we will silently drop older
+ // ranges during packet construction.
+ //
+ // Remembering fewer ranges can result in unnecessary retransmissions,
+ // since we cannot accept packets older than the oldest remembered range.
+ //
+ // The limit here is completely arbitrary. If it seems wrong, it probably is.
+ //
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.2.3
+ const maxAckRanges = 8
+ if overflow := acks.seen.numRanges() - maxAckRanges; overflow > 0 {
+ acks.seen.removeranges(0, overflow)
+ }
+}
+
+// mustAckImmediately reports whether an ack-eliciting packet must be acknowledged immediately,
+// or whether the ack may be deferred.
+func (acks *ackState) mustAckImmediately(space numberSpace, num packetNumber) bool {
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.1
+ if space != appDataSpace {
+ // "[...] all ack-eliciting Initial and Handshake packets [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.1-2
+ return true
+ }
+ if num < acks.maxAckEliciting {
+ // "[...] when the received packet has a packet number less than another
+ // ack-eliciting packet that has been received [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.1-8.1
+ return true
+ }
+ if acks.seen.rangeContaining(acks.maxAckEliciting).end != num {
+ // "[...] when the packet has a packet number larger than the highest-numbered
+ // ack-eliciting packet that has been received and there are missing packets
+ // between that packet and this packet."
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.1-8.2
+ //
+ // This case is a bit tricky. Let's say we've received:
+ // 0, ack-eliciting
+ // 1, ack-eliciting
+ // 3, NOT ack eliciting
+ //
+ // We have sent ACKs for 0 and 1. If we receive ack-eliciting packet 2,
+ // we do not need to send an immediate ACK, because there are no missing
+ // packets between it and the highest-numbered ack-eliciting packet (1).
+ // If we receive ack-eliciting packet 4, we do need to send an immediate ACK,
+ // because there's a gap (the missing packet 2).
+ //
+ // We check for this by looking up the ACK range which contains the
+ // highest-numbered ack-eliciting packet: [0, 1) in the above example.
+ // If the range ends just before the packet we are now processing,
+ // there are no gaps. If it does not, there must be a gap.
+ return true
+ }
+ // "[...] SHOULD send an ACK frame after receiving at least two ack-eliciting packets."
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.2
+ //
+ // This ack frequency takes a substantial toll on performance, however.
+ // Follow the behavior of Google QUICHE:
+ // Ack every other packet for the first 100 packets, and then ack every 10th packet.
+ // This keeps ack frequency high during the beginning of slow start when CWND is
+ // increasing rapidly.
+ packetsBeforeAck := 2
+ if acks.seen.max() > 100 {
+ packetsBeforeAck = 10
+ }
+ return acks.unackedAckEliciting >= packetsBeforeAck
+}
+
+// shouldSendAck reports whether the connection should send an ACK frame at this time,
+// in an ACK-only packet if necessary.
+func (acks *ackState) shouldSendAck(now time.Time) bool {
+ return !acks.nextAck.IsZero() && !acks.nextAck.After(now)
+}
+
+// acksToSend returns the set of packet numbers to ACK at this time, and the current ack delay.
+// It may return acks even if shouldSendAck returns false, when there are unacked
+// ack-eliciting packets whose ack is being delayed.
+func (acks *ackState) acksToSend(now time.Time) (nums rangeset[packetNumber], ackDelay time.Duration) {
+ if acks.nextAck.IsZero() && acks.unackedAckEliciting == 0 {
+ return nil, 0
+ }
+ // "[...] the delays intentionally introduced between the time the packet with the
+ // largest packet number is received and the time an acknowledgement is sent."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.2.5-1
+ delay := now.Sub(acks.maxRecvTime)
+ if delay < 0 {
+ delay = 0
+ }
+ return acks.seen, delay
+}
+
+// sentAck records that an ACK frame has been sent.
+func (acks *ackState) sentAck() {
+ acks.nextAck = time.Time{}
+ acks.unackedAckEliciting = 0
+}
+
+// handleAck records that an ack has been received for a ACK frame we sent
+// containing the given Largest Acknowledged field.
+func (acks *ackState) handleAck(largestAcked packetNumber) {
+ // We can stop acking packets less or equal to largestAcked.
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.2.4-1
+ //
+ // We rely on acks.seen containing the largest packet number that has been successfully
+ // processed, so we retain the range containing largestAcked and discard previous ones.
+ acks.seen.sub(0, acks.seen.rangeContaining(largestAcked).start)
+}
+
+// largestSeen reports the largest seen packet.
+func (acks *ackState) largestSeen() packetNumber {
+ return acks.seen.max()
+}
diff --git a/quic/acks_test.go b/quic/acks_test.go
new file mode 100644
index 000000000..d10f917ad
--- /dev/null
+++ b/quic/acks_test.go
@@ -0,0 +1,235 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "slices"
+ "testing"
+ "time"
+)
+
+func TestAcksDisallowDuplicate(t *testing.T) {
+ // Don't process a packet that we've seen before.
+ acks := ackState{}
+ now := time.Now()
+ receive := []packetNumber{0, 1, 2, 4, 7, 6, 9}
+ seen := map[packetNumber]bool{}
+ for i, pnum := range receive {
+ acks.receive(now, appDataSpace, pnum, true)
+ seen[pnum] = true
+ for ppnum := packetNumber(0); ppnum < 11; ppnum++ {
+ if got, want := acks.shouldProcess(ppnum), !seen[ppnum]; got != want {
+ t.Fatalf("after receiving %v: acks.shouldProcess(%v) = %v, want %v", receive[:i+1], ppnum, got, want)
+ }
+ }
+ }
+}
+
+func TestAcksDisallowDiscardedAckRanges(t *testing.T) {
+ // Don't process a packet with a number in a discarded range.
+ acks := ackState{}
+ now := time.Now()
+ for pnum := packetNumber(0); ; pnum += 2 {
+ acks.receive(now, appDataSpace, pnum, true)
+ send, _ := acks.acksToSend(now)
+ for ppnum := packetNumber(0); ppnum < packetNumber(send.min()); ppnum++ {
+ if acks.shouldProcess(ppnum) {
+ t.Fatalf("after limiting ack ranges to %v: acks.shouldProcess(%v) (in discarded range) = true, want false", send, ppnum)
+ }
+ }
+ if send.min() > 10 {
+ break
+ }
+ }
+}
+
+func TestAcksSent(t *testing.T) {
+ type packet struct {
+ pnum packetNumber
+ ackEliciting bool
+ }
+ for _, test := range []struct {
+ name string
+ space numberSpace
+
+ // ackedPackets and packets are packets that we receive.
+ // After receiving all packets in ackedPackets, we send an ack.
+ // Then we receive the subsequent packets in packets.
+ ackedPackets []packet
+ packets []packet
+
+ wantDelay time.Duration
+ wantAcks rangeset[packetNumber]
+ }{{
+ name: "no packets to ack",
+ space: initialSpace,
+ }, {
+ name: "non-ack-eliciting packets are not acked",
+ space: initialSpace,
+ packets: []packet{{
+ pnum: 0,
+ ackEliciting: false,
+ }},
+ }, {
+ name: "ack-eliciting Initial packets are acked immediately",
+ space: initialSpace,
+ packets: []packet{{
+ pnum: 0,
+ ackEliciting: true,
+ }},
+ wantAcks: rangeset[packetNumber]{{0, 1}},
+ wantDelay: 0,
+ }, {
+ name: "ack-eliciting Handshake packets are acked immediately",
+ space: handshakeSpace,
+ packets: []packet{{
+ pnum: 0,
+ ackEliciting: true,
+ }},
+ wantAcks: rangeset[packetNumber]{{0, 1}},
+ wantDelay: 0,
+ }, {
+ name: "ack-eliciting AppData packets are acked after max_ack_delay",
+ space: appDataSpace,
+ packets: []packet{{
+ pnum: 0,
+ ackEliciting: true,
+ }},
+ wantAcks: rangeset[packetNumber]{{0, 1}},
+ wantDelay: maxAckDelay - timerGranularity,
+ }, {
+ name: "reordered ack-eliciting packets are acked immediately",
+ space: appDataSpace,
+ ackedPackets: []packet{{
+ pnum: 1,
+ ackEliciting: true,
+ }},
+ packets: []packet{{
+ pnum: 0,
+ ackEliciting: true,
+ }},
+ wantAcks: rangeset[packetNumber]{{0, 2}},
+ wantDelay: 0,
+ }, {
+ name: "gaps in ack-eliciting packets are acked immediately",
+ space: appDataSpace,
+ packets: []packet{{
+ pnum: 1,
+ ackEliciting: true,
+ }},
+ wantAcks: rangeset[packetNumber]{{1, 2}},
+ wantDelay: 0,
+ }, {
+ name: "reordered non-ack-eliciting packets are not acked immediately",
+ space: appDataSpace,
+ ackedPackets: []packet{{
+ pnum: 1,
+ ackEliciting: true,
+ }},
+ packets: []packet{{
+ pnum: 2,
+ ackEliciting: true,
+ }, {
+ pnum: 0,
+ ackEliciting: false,
+ }, {
+ pnum: 4,
+ ackEliciting: false,
+ }},
+ wantAcks: rangeset[packetNumber]{{0, 3}, {4, 5}},
+ wantDelay: maxAckDelay - timerGranularity,
+ }, {
+ name: "immediate ack after two ack-eliciting packets are received",
+ space: appDataSpace,
+ packets: []packet{{
+ pnum: 0,
+ ackEliciting: true,
+ }, {
+ pnum: 1,
+ ackEliciting: true,
+ }},
+ wantAcks: rangeset[packetNumber]{{0, 2}},
+ wantDelay: 0,
+ }} {
+ t.Run(test.name, func(t *testing.T) {
+ acks := ackState{}
+ start := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)
+ for _, p := range test.ackedPackets {
+ t.Logf("receive %v.%v, ack-eliciting=%v", test.space, p.pnum, p.ackEliciting)
+ acks.receive(start, test.space, p.pnum, p.ackEliciting)
+ }
+ t.Logf("send an ACK frame")
+ acks.sentAck()
+ for _, p := range test.packets {
+ t.Logf("receive %v.%v, ack-eliciting=%v", test.space, p.pnum, p.ackEliciting)
+ acks.receive(start, test.space, p.pnum, p.ackEliciting)
+ }
+ switch {
+ case len(test.wantAcks) == 0:
+ // No ACK should be sent, even well after max_ack_delay.
+ if acks.shouldSendAck(start.Add(10 * maxAckDelay)) {
+ t.Errorf("acks.shouldSendAck(T+10*max_ack_delay) = true, want false")
+ }
+ case test.wantDelay > 0:
+ // No ACK should be sent before a delay.
+ if acks.shouldSendAck(start.Add(test.wantDelay - 1)) {
+ t.Errorf("acks.shouldSendAck(T+%v-1ns) = true, want false", test.wantDelay)
+ }
+ fallthrough
+ default:
+ // ACK should be sent after a delay.
+ if !acks.shouldSendAck(start.Add(test.wantDelay)) {
+ t.Errorf("acks.shouldSendAck(T+%v) = false, want true", test.wantDelay)
+ }
+ }
+ // acksToSend always reports the available packets that can be acked,
+ // and the amount of time that has passed since the most recent acked
+ // packet was received.
+ for _, delay := range []time.Duration{
+ 0,
+ test.wantDelay,
+ test.wantDelay + 1,
+ } {
+ gotNums, gotDelay := acks.acksToSend(start.Add(delay))
+ wantDelay := delay
+ if len(gotNums) == 0 {
+ wantDelay = 0
+ }
+ if !slices.Equal(gotNums, test.wantAcks) || gotDelay != wantDelay {
+ t.Errorf("acks.acksToSend(T+%v) = %v, %v; want %v, %v", delay, gotNums, gotDelay, test.wantAcks, wantDelay)
+ }
+ }
+ })
+ }
+}
+
+func TestAcksDiscardAfterAck(t *testing.T) {
+ acks := ackState{}
+ now := time.Now()
+ acks.receive(now, appDataSpace, 0, true)
+ acks.receive(now, appDataSpace, 2, true)
+ acks.receive(now, appDataSpace, 4, true)
+ acks.receive(now, appDataSpace, 5, true)
+ acks.receive(now, appDataSpace, 6, true)
+ acks.handleAck(6) // discards all ranges prior to the one containing packet 6
+ acks.receive(now, appDataSpace, 7, true)
+ got, _ := acks.acksToSend(now)
+ if len(got) != 1 {
+ t.Errorf("acks.acksToSend contains ranges prior to last acknowledged ack; got %v, want 1 range", got)
+ }
+}
+
+func TestAcksLargestSeen(t *testing.T) {
+ acks := ackState{}
+ now := time.Now()
+ acks.receive(now, appDataSpace, 0, true)
+ acks.receive(now, appDataSpace, 4, true)
+ acks.receive(now, appDataSpace, 1, true)
+ if got, want := acks.largestSeen(), packetNumber(4); got != want {
+ t.Errorf("acks.largestSeen() = %v, want %v", got, want)
+ }
+}
diff --git a/quic/atomic_bits.go b/quic/atomic_bits.go
new file mode 100644
index 000000000..e1e2594d1
--- /dev/null
+++ b/quic/atomic_bits.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import "sync/atomic"
+
+// atomicBits is an atomic uint32 that supports setting individual bits.
+type atomicBits[T ~uint32] struct {
+ bits atomic.Uint32
+}
+
+// set sets the bits in mask to the corresponding bits in v.
+// It returns the new value.
+func (a *atomicBits[T]) set(v, mask T) T {
+ if v&^mask != 0 {
+ panic("BUG: bits in v are not in mask")
+ }
+ for {
+ o := a.bits.Load()
+ n := (o &^ uint32(mask)) | uint32(v)
+ if a.bits.CompareAndSwap(o, n) {
+ return T(n)
+ }
+ }
+}
+
+func (a *atomicBits[T]) load() T {
+ return T(a.bits.Load())
+}
diff --git a/quic/bench_test.go b/quic/bench_test.go
new file mode 100644
index 000000000..636b71327
--- /dev/null
+++ b/quic/bench_test.go
@@ -0,0 +1,170 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "sync"
+ "testing"
+)
+
+// BenchmarkThroughput is based on the crypto/tls benchmark of the same name.
+func BenchmarkThroughput(b *testing.B) {
+ for size := 1; size <= 64; size <<= 1 {
+ name := fmt.Sprintf("%dMiB", size)
+ b.Run(name, func(b *testing.B) {
+ throughput(b, int64(size<<20))
+ })
+ }
+}
+
+func throughput(b *testing.B, totalBytes int64) {
+ // Same buffer size as crypto/tls's BenchmarkThroughput, for consistency.
+ const bufsize = 32 << 10
+
+ cli, srv := newLocalConnPair(b, &Config{}, &Config{})
+
+ go func() {
+ buf := make([]byte, bufsize)
+ for i := 0; i < b.N; i++ {
+ sconn, err := srv.AcceptStream(context.Background())
+ if err != nil {
+ panic(fmt.Errorf("AcceptStream: %v", err))
+ }
+ if _, err := io.CopyBuffer(sconn, sconn, buf); err != nil {
+ panic(fmt.Errorf("CopyBuffer: %v", err))
+ }
+ sconn.Close()
+ }
+ }()
+
+ b.SetBytes(totalBytes)
+ buf := make([]byte, bufsize)
+ chunks := int(math.Ceil(float64(totalBytes) / float64(len(buf))))
+ for i := 0; i < b.N; i++ {
+ cconn, err := cli.NewStream(context.Background())
+ if err != nil {
+ b.Fatalf("NewStream: %v", err)
+ }
+ closec := make(chan struct{})
+ go func() {
+ defer close(closec)
+ buf := make([]byte, bufsize)
+ if _, err := io.CopyBuffer(io.Discard, cconn, buf); err != nil {
+ panic(fmt.Errorf("Discard: %v", err))
+ }
+ }()
+ for j := 0; j < chunks; j++ {
+ _, err := cconn.Write(buf)
+ if err != nil {
+ b.Fatalf("Write: %v", err)
+ }
+ }
+ cconn.CloseWrite()
+ <-closec
+ cconn.Close()
+ }
+}
+
+func BenchmarkReadByte(b *testing.B) {
+ cli, srv := newLocalConnPair(b, &Config{}, &Config{})
+
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ buf := make([]byte, 1<<20)
+ sconn, err := srv.AcceptStream(context.Background())
+ if err != nil {
+ panic(fmt.Errorf("AcceptStream: %v", err))
+ }
+ for {
+ if _, err := sconn.Write(buf); err != nil {
+ break
+ }
+ sconn.Flush()
+ }
+ }()
+
+ b.SetBytes(1)
+ cconn, err := cli.NewStream(context.Background())
+ if err != nil {
+ b.Fatalf("NewStream: %v", err)
+ }
+ cconn.Flush()
+ for i := 0; i < b.N; i++ {
+ _, err := cconn.ReadByte()
+ if err != nil {
+ b.Fatalf("ReadByte: %v", err)
+ }
+ }
+ cconn.Close()
+}
+
+func BenchmarkWriteByte(b *testing.B) {
+ cli, srv := newLocalConnPair(b, &Config{}, &Config{})
+
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ sconn, err := srv.AcceptStream(context.Background())
+ if err != nil {
+ panic(fmt.Errorf("AcceptStream: %v", err))
+ }
+ n, err := io.Copy(io.Discard, sconn)
+ if n != int64(b.N) || err != nil {
+ b.Errorf("server io.Copy() = %v, %v; want %v, nil", n, err, b.N)
+ }
+ }()
+
+ b.SetBytes(1)
+ cconn, err := cli.NewStream(context.Background())
+ if err != nil {
+ b.Fatalf("NewStream: %v", err)
+ }
+ cconn.Flush()
+ for i := 0; i < b.N; i++ {
+ if err := cconn.WriteByte(0); err != nil {
+ b.Fatalf("WriteByte: %v", err)
+ }
+ }
+ cconn.Close()
+}
+
+func BenchmarkStreamCreation(b *testing.B) {
+ cli, srv := newLocalConnPair(b, &Config{}, &Config{})
+
+ go func() {
+ for i := 0; i < b.N; i++ {
+ sconn, err := srv.AcceptStream(context.Background())
+ if err != nil {
+ panic(fmt.Errorf("AcceptStream: %v", err))
+ }
+ sconn.Close()
+ }
+ }()
+
+ buf := make([]byte, 1)
+ for i := 0; i < b.N; i++ {
+ cconn, err := cli.NewStream(context.Background())
+ if err != nil {
+ b.Fatalf("NewStream: %v", err)
+ }
+ cconn.Write(buf)
+ cconn.Flush()
+ cconn.Read(buf)
+ cconn.Close()
+ }
+}
diff --git a/quic/config.go b/quic/config.go
new file mode 100644
index 000000000..5d420312b
--- /dev/null
+++ b/quic/config.go
@@ -0,0 +1,158 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "crypto/tls"
+ "log/slog"
+ "math"
+ "time"
+)
+
+// A Config structure configures a QUIC endpoint.
+// A Config must not be modified after it has been passed to a QUIC function.
+// A Config may be reused; the quic package will also not modify it.
+type Config struct {
+ // TLSConfig is the endpoint's TLS configuration.
+ // It must be non-nil and include at least one certificate or else set GetCertificate.
+ TLSConfig *tls.Config
+
+ // MaxBidiRemoteStreams limits the number of simultaneous bidirectional streams
+ // a peer may open.
+ // If zero, the default value of 100 is used.
+ // If negative, the limit is zero.
+ MaxBidiRemoteStreams int64
+
+ // MaxUniRemoteStreams limits the number of simultaneous unidirectional streams
+ // a peer may open.
+ // If zero, the default value of 100 is used.
+ // If negative, the limit is zero.
+ MaxUniRemoteStreams int64
+
+ // MaxStreamReadBufferSize is the maximum amount of data sent by the peer that a
+ // stream will buffer for reading.
+ // If zero, the default value of 1MiB is used.
+ // If negative, the limit is zero.
+ MaxStreamReadBufferSize int64
+
+ // MaxStreamWriteBufferSize is the maximum amount of data a stream will buffer for
+ // sending to the peer.
+ // If zero, the default value of 1MiB is used.
+ // If negative, the limit is zero.
+ MaxStreamWriteBufferSize int64
+
+ // MaxConnReadBufferSize is the maximum amount of data sent by the peer that a
+ // connection will buffer for reading, across all streams.
+ // If zero, the default value of 1MiB is used.
+ // If negative, the limit is zero.
+ MaxConnReadBufferSize int64
+
+ // RequireAddressValidation may be set to true to enable address validation
+ // of client connections prior to starting the handshake.
+ //
+ // Enabling this setting reduces the amount of work packets with spoofed
+ // source address information can cause a server to perform,
+ // at the cost of increased handshake latency.
+ RequireAddressValidation bool
+
+ // StatelessResetKey is used to provide stateless reset of connections.
+ // A restart may leave an endpoint without access to the state of
+ // existing connections. Stateless reset permits an endpoint to respond
+ // to a packet for a connection it does not recognize.
+ //
+ // This field should be filled with random bytes.
+ // The contents should remain stable across restarts,
+ // to permit an endpoint to send a reset for
+ // connections created before a restart.
+ //
+ // The contents of the StatelessResetKey should not be exposed.
+ // An attacker can use knowledge of this field's value to
+ // reset existing connections.
+ //
+ // If this field is left as zero, stateless reset is disabled.
+ StatelessResetKey [32]byte
+
+ // HandshakeTimeout is the maximum time in which a connection handshake must complete.
+ // If zero, the default of 10 seconds is used.
+ // If negative, there is no handshake timeout.
+ HandshakeTimeout time.Duration
+
+ // MaxIdleTimeout is the maximum time after which an idle connection will be closed.
+ // If zero, the default of 30 seconds is used.
+ // If negative, idle connections are never closed.
+ //
+ // The idle timeout for a connection is the minimum of the maximum idle timeouts
+ // of the endpoints.
+ MaxIdleTimeout time.Duration
+
+ // KeepAlivePeriod is the time after which a packet will be sent to keep
+ // an idle connection alive.
+ // If zero, keep alive packets are not sent.
+ // If greater than zero, the keep alive period is the smaller of KeepAlivePeriod and
+ // half the connection idle timeout.
+ KeepAlivePeriod time.Duration
+
+ // QLogLogger receives qlog events.
+ //
+ // Events currently correspond to the definitions in draft-ietf-qlog-quic-events-03.
+ // This is not the latest version of the draft, but is the latest version supported
+ // by common event log viewers as of the time this paragraph was written.
+ //
+ // The qlog package contains a slog.Handler which serializes qlog events
+ // to a standard JSON representation.
+ QLogLogger *slog.Logger
+}
+
+// Clone returns a shallow clone of c, or nil if c is nil.
+// It is safe to clone a [Config] that is being used concurrently by a QUIC endpoint.
+func (c *Config) Clone() *Config {
+ n := *c
+ return &n
+}
+
+func configDefault[T ~int64](v, def, limit T) T {
+ switch {
+ case v == 0:
+ return def
+ case v < 0:
+ return 0
+ default:
+ return min(v, limit)
+ }
+}
+
+func (c *Config) maxBidiRemoteStreams() int64 {
+ return configDefault(c.MaxBidiRemoteStreams, 100, maxStreamsLimit)
+}
+
+func (c *Config) maxUniRemoteStreams() int64 {
+ return configDefault(c.MaxUniRemoteStreams, 100, maxStreamsLimit)
+}
+
+func (c *Config) maxStreamReadBufferSize() int64 {
+ return configDefault(c.MaxStreamReadBufferSize, 1<<20, maxVarint)
+}
+
+func (c *Config) maxStreamWriteBufferSize() int64 {
+ return configDefault(c.MaxStreamWriteBufferSize, 1<<20, maxVarint)
+}
+
+func (c *Config) maxConnReadBufferSize() int64 {
+ return configDefault(c.MaxConnReadBufferSize, 1<<20, maxVarint)
+}
+
+func (c *Config) handshakeTimeout() time.Duration {
+ return configDefault(c.HandshakeTimeout, defaultHandshakeTimeout, math.MaxInt64)
+}
+
+func (c *Config) maxIdleTimeout() time.Duration {
+ return configDefault(c.MaxIdleTimeout, defaultMaxIdleTimeout, math.MaxInt64)
+}
+
+func (c *Config) keepAlivePeriod() time.Duration {
+ return configDefault(c.KeepAlivePeriod, defaultKeepAlivePeriod, math.MaxInt64)
+}
diff --git a/quic/config_test.go b/quic/config_test.go
new file mode 100644
index 000000000..d292854f5
--- /dev/null
+++ b/quic/config_test.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import "testing"
+
+func TestConfigTransportParameters(t *testing.T) {
+ const (
+ wantInitialMaxData = int64(1)
+ wantInitialMaxStreamData = int64(2)
+ wantInitialMaxStreamsBidi = int64(3)
+ wantInitialMaxStreamsUni = int64(4)
+ )
+ tc := newTestConn(t, clientSide, func(c *Config) {
+ c.MaxBidiRemoteStreams = wantInitialMaxStreamsBidi
+ c.MaxUniRemoteStreams = wantInitialMaxStreamsUni
+ c.MaxStreamReadBufferSize = wantInitialMaxStreamData
+ c.MaxConnReadBufferSize = wantInitialMaxData
+ })
+ tc.handshake()
+ if tc.sentTransportParameters == nil {
+ t.Fatalf("conn didn't send transport parameters during handshake")
+ }
+ p := tc.sentTransportParameters
+ if got, want := p.initialMaxData, wantInitialMaxData; got != want {
+ t.Errorf("initial_max_data = %v, want %v", got, want)
+ }
+ if got, want := p.initialMaxStreamDataBidiLocal, wantInitialMaxStreamData; got != want {
+ t.Errorf("initial_max_stream_data_bidi_local = %v, want %v", got, want)
+ }
+ if got, want := p.initialMaxStreamDataBidiRemote, wantInitialMaxStreamData; got != want {
+ t.Errorf("initial_max_stream_data_bidi_remote = %v, want %v", got, want)
+ }
+ if got, want := p.initialMaxStreamDataUni, wantInitialMaxStreamData; got != want {
+ t.Errorf("initial_max_stream_data_uni = %v, want %v", got, want)
+ }
+ if got, want := p.initialMaxStreamsBidi, wantInitialMaxStreamsBidi; got != want {
+ t.Errorf("initial_max_stream_data_uni = %v, want %v", got, want)
+ }
+ if got, want := p.initialMaxStreamsUni, wantInitialMaxStreamsUni; got != want {
+ t.Errorf("initial_max_stream_data_uni = %v, want %v", got, want)
+ }
+}
diff --git a/internal/quic/congestion_reno.go b/quic/congestion_reno.go
similarity index 83%
rename from internal/quic/congestion_reno.go
rename to quic/congestion_reno.go
index 982cbf4bb..a53983524 100644
--- a/internal/quic/congestion_reno.go
+++ b/quic/congestion_reno.go
@@ -7,6 +7,8 @@
package quic
import (
+ "context"
+ "log/slog"
"math"
"time"
)
@@ -40,6 +42,9 @@ type ccReno struct {
// true if we haven't sent that packet yet.
sendOnePacketInRecovery bool
+ // inRecovery is set when we are in the recovery state.
+ inRecovery bool
+
// underutilized is set if the congestion window is underutilized
// due to insufficient application data, flow control limits, or
// anti-amplification limits.
@@ -100,12 +105,19 @@ func (c *ccReno) canSend() bool {
// congestion controller permits sending data, but no data is sent.
//
// https://www.rfc-editor.org/rfc/rfc9002#section-7.8
-func (c *ccReno) setUnderutilized(v bool) {
+func (c *ccReno) setUnderutilized(log *slog.Logger, v bool) {
+ if c.underutilized == v {
+ return
+ }
+ oldState := c.state()
c.underutilized = v
+ if logEnabled(log, QLogLevelPacket) {
+ logCongestionStateUpdated(log, oldState, c.state())
+ }
}
// packetSent indicates that a packet has been sent.
-func (c *ccReno) packetSent(now time.Time, space numberSpace, sent *sentPacket) {
+func (c *ccReno) packetSent(now time.Time, log *slog.Logger, space numberSpace, sent *sentPacket) {
if !sent.inFlight {
return
}
@@ -185,7 +197,11 @@ func (c *ccReno) packetLost(now time.Time, space numberSpace, sent *sentPacket,
}
// packetBatchEnd is called at the end of processing a batch of acked or lost packets.
-func (c *ccReno) packetBatchEnd(now time.Time, space numberSpace, rtt *rttState, maxAckDelay time.Duration) {
+func (c *ccReno) packetBatchEnd(now time.Time, log *slog.Logger, space numberSpace, rtt *rttState, maxAckDelay time.Duration) {
+ if logEnabled(log, QLogLevelPacket) {
+ oldState := c.state()
+ defer func() { logCongestionStateUpdated(log, oldState, c.state()) }()
+ }
if !c.ackLastLoss.IsZero() && !c.ackLastLoss.Before(c.recoveryStartTime) {
// Enter the recovery state.
// https://www.rfc-editor.org/rfc/rfc9002.html#section-7.3.2
@@ -196,8 +212,10 @@ func (c *ccReno) packetBatchEnd(now time.Time, space numberSpace, rtt *rttState,
// Clear congestionPendingAcks to avoid increasing the congestion
// window based on acks in a frame that sends us into recovery.
c.congestionPendingAcks = 0
+ c.inRecovery = true
} else if c.congestionPendingAcks > 0 {
// We are in slow start or congestion avoidance.
+ c.inRecovery = false
if c.congestionWindow < c.slowStartThreshold {
// When the congestion window is less than the slow start threshold,
// we are in slow start and increase the window by the number of
@@ -253,3 +271,38 @@ func (c *ccReno) minimumCongestionWindow() int {
// https://www.rfc-editor.org/rfc/rfc9002.html#section-7.2-4
return 2 * c.maxDatagramSize
}
+
+func logCongestionStateUpdated(log *slog.Logger, oldState, newState congestionState) {
+ if oldState == newState {
+ return
+ }
+ log.LogAttrs(context.Background(), QLogLevelPacket,
+ "recovery:congestion_state_updated",
+ slog.String("old", oldState.String()),
+ slog.String("new", newState.String()),
+ )
+}
+
+type congestionState string
+
+func (s congestionState) String() string { return string(s) }
+
+const (
+ congestionSlowStart = congestionState("slow_start")
+ congestionCongestionAvoidance = congestionState("congestion_avoidance")
+ congestionApplicationLimited = congestionState("application_limited")
+ congestionRecovery = congestionState("recovery")
+)
+
+func (c *ccReno) state() congestionState {
+ switch {
+ case c.inRecovery:
+ return congestionRecovery
+ case c.underutilized:
+ return congestionApplicationLimited
+ case c.congestionWindow < c.slowStartThreshold:
+ return congestionSlowStart
+ default:
+ return congestionCongestionAvoidance
+ }
+}
diff --git a/internal/quic/congestion_reno_test.go b/quic/congestion_reno_test.go
similarity index 99%
rename from internal/quic/congestion_reno_test.go
rename to quic/congestion_reno_test.go
index e9af6452c..cda7a90a8 100644
--- a/internal/quic/congestion_reno_test.go
+++ b/quic/congestion_reno_test.go
@@ -470,7 +470,7 @@ func (c *ccTest) setRTT(smoothedRTT, rttvar time.Duration) {
func (c *ccTest) setUnderutilized(v bool) {
c.t.Helper()
c.t.Logf("set underutilized = %v", v)
- c.cc.setUnderutilized(v)
+ c.cc.setUnderutilized(nil, v)
}
func (c *ccTest) packetSent(space numberSpace, size int, fns ...func(*sentPacket)) *sentPacket {
@@ -488,7 +488,7 @@ func (c *ccTest) packetSent(space numberSpace, size int, fns ...func(*sentPacket
f(sent)
}
c.t.Logf("packet sent: num=%v.%v, size=%v", space, sent.num, sent.size)
- c.cc.packetSent(c.now, space, sent)
+ c.cc.packetSent(c.now, nil, space, sent)
return sent
}
@@ -519,7 +519,7 @@ func (c *ccTest) packetDiscarded(space numberSpace, sent *sentPacket) {
func (c *ccTest) packetBatchEnd(space numberSpace) {
c.t.Helper()
c.t.Logf("(end of batch)")
- c.cc.packetBatchEnd(c.now, space, &c.rtt, c.maxAckDelay)
+ c.cc.packetBatchEnd(c.now, nil, space, &c.rtt, c.maxAckDelay)
}
func (c *ccTest) wantCanSend(want bool) {
diff --git a/quic/conn.go b/quic/conn.go
new file mode 100644
index 000000000..38e8fe8f4
--- /dev/null
+++ b/quic/conn.go
@@ -0,0 +1,456 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/netip"
+ "time"
+)
+
+// A Conn is a QUIC connection.
+//
+// Multiple goroutines may invoke methods on a Conn simultaneously.
+type Conn struct {
+ side connSide
+ endpoint *Endpoint
+ config *Config
+ testHooks connTestHooks
+ peerAddr netip.AddrPort
+ localAddr netip.AddrPort
+
+ msgc chan any
+ donec chan struct{} // closed when conn loop exits
+
+ w packetWriter
+ acks [numberSpaceCount]ackState // indexed by number space
+ lifetime lifetimeState
+ idle idleState
+ connIDState connIDState
+ loss lossState
+ streams streamsState
+ path pathState
+
+ // Packet protection keys, CRYPTO streams, and TLS state.
+ keysInitial fixedKeyPair
+ keysHandshake fixedKeyPair
+ keysAppData updatingKeyPair
+ crypto [numberSpaceCount]cryptoStream
+ tls *tls.QUICConn
+
+ // retryToken is the token provided by the peer in a Retry packet.
+ retryToken []byte
+
+ // handshakeConfirmed is set when the handshake is confirmed.
+ // For server connections, it tracks sending HANDSHAKE_DONE.
+ handshakeConfirmed sentVal
+
+ peerAckDelayExponent int8 // -1 when unknown
+
+ // Tests only: Send a PING in a specific number space.
+ testSendPingSpace numberSpace
+ testSendPing sentVal
+
+ log *slog.Logger
+}
+
+// connTestHooks override conn behavior in tests.
+type connTestHooks interface {
+ // init is called after a conn is created.
+ init()
+
+ // nextMessage is called to request the next event from msgc.
+ // Used to give tests control of the connection event loop.
+ nextMessage(msgc chan any, nextTimeout time.Time) (now time.Time, message any)
+
+ // handleTLSEvent is called with each TLS event.
+ handleTLSEvent(tls.QUICEvent)
+
+ // newConnID is called to generate a new connection ID.
+ // Permits tests to generate consistent connection IDs rather than random ones.
+ newConnID(seq int64) ([]byte, error)
+
+ // waitUntil blocks until the until func returns true or the context is done.
+ // Used to synchronize asynchronous blocking operations in tests.
+ waitUntil(ctx context.Context, until func() bool) error
+
+ // timeNow returns the current time.
+ timeNow() time.Time
+}
+
+// newServerConnIDs is connection IDs associated with a new server connection.
+type newServerConnIDs struct {
+ srcConnID []byte // source from client's current Initial
+ dstConnID []byte // destination from client's current Initial
+ originalDstConnID []byte // destination from client's first Initial
+ retrySrcConnID []byte // source from server's Retry
+}
+
+func newConn(now time.Time, side connSide, cids newServerConnIDs, peerHostname string, peerAddr netip.AddrPort, config *Config, e *Endpoint) (conn *Conn, _ error) {
+ c := &Conn{
+ side: side,
+ endpoint: e,
+ config: config,
+ peerAddr: unmapAddrPort(peerAddr),
+ msgc: make(chan any, 1),
+ donec: make(chan struct{}),
+ peerAckDelayExponent: -1,
+ }
+ defer func() {
+ // If we hit an error in newConn, close donec so tests don't get stuck waiting for it.
+ // This is only relevant if we've got a bug, but it makes tracking that bug down
+ // much easier.
+ if conn == nil {
+ close(c.donec)
+ }
+ }()
+
+ // A one-element buffer allows us to wake a Conn's event loop as a
+ // non-blocking operation.
+ c.msgc = make(chan any, 1)
+
+ if e.testHooks != nil {
+ e.testHooks.newConn(c)
+ }
+
+ // initialConnID is the connection ID used to generate Initial packet protection keys.
+ var initialConnID []byte
+ if c.side == clientSide {
+ if err := c.connIDState.initClient(c); err != nil {
+ return nil, err
+ }
+ initialConnID, _ = c.connIDState.dstConnID()
+ } else {
+ initialConnID = cids.originalDstConnID
+ if cids.retrySrcConnID != nil {
+ initialConnID = cids.retrySrcConnID
+ }
+ if err := c.connIDState.initServer(c, cids); err != nil {
+ return nil, err
+ }
+ }
+
+ // TODO: PMTU discovery.
+ c.logConnectionStarted(cids.originalDstConnID, peerAddr)
+ c.keysAppData.init()
+ c.loss.init(c.side, smallestMaxDatagramSize, now)
+ c.streamsInit()
+ c.lifetimeInit()
+ c.restartIdleTimer(now)
+
+ if err := c.startTLS(now, initialConnID, peerHostname, transportParameters{
+ initialSrcConnID: c.connIDState.srcConnID(),
+ originalDstConnID: cids.originalDstConnID,
+ retrySrcConnID: cids.retrySrcConnID,
+ ackDelayExponent: ackDelayExponent,
+ maxUDPPayloadSize: maxUDPPayloadSize,
+ maxAckDelay: maxAckDelay,
+ disableActiveMigration: true,
+ initialMaxData: config.maxConnReadBufferSize(),
+ initialMaxStreamDataBidiLocal: config.maxStreamReadBufferSize(),
+ initialMaxStreamDataBidiRemote: config.maxStreamReadBufferSize(),
+ initialMaxStreamDataUni: config.maxStreamReadBufferSize(),
+ initialMaxStreamsBidi: c.streams.remoteLimit[bidiStream].max,
+ initialMaxStreamsUni: c.streams.remoteLimit[uniStream].max,
+ activeConnIDLimit: activeConnIDLimit,
+ }); err != nil {
+ return nil, err
+ }
+
+ if c.testHooks != nil {
+ c.testHooks.init()
+ }
+ go c.loop(now)
+ return c, nil
+}
+
+func (c *Conn) String() string {
+ return fmt.Sprintf("quic.Conn(%v,->%v)", c.side, c.peerAddr)
+}
+
+// confirmHandshake is called when the handshake is confirmed.
+// https://www.rfc-editor.org/rfc/rfc9001#section-4.1.2
+func (c *Conn) confirmHandshake(now time.Time) {
+ // If handshakeConfirmed is unset, the handshake is not confirmed.
+ // If it is unsent, the handshake is confirmed and we need to send a HANDSHAKE_DONE.
+ // If it is sent, we have sent a HANDSHAKE_DONE.
+ // If it is received, the handshake is confirmed and we do not need to send anything.
+ if c.handshakeConfirmed.isSet() {
+ return // already confirmed
+ }
+ if c.side == serverSide {
+ // When the server confirms the handshake, it sends a HANDSHAKE_DONE.
+ c.handshakeConfirmed.setUnsent()
+ c.endpoint.serverConnEstablished(c)
+ } else {
+ // The client never sends a HANDSHAKE_DONE, so we set handshakeConfirmed
+ // to the received state, indicating that the handshake is confirmed and we
+ // don't need to send anything.
+ c.handshakeConfirmed.setReceived()
+ }
+ c.restartIdleTimer(now)
+ c.loss.confirmHandshake()
+ // "An endpoint MUST discard its Handshake keys when the TLS handshake is confirmed"
+ // https://www.rfc-editor.org/rfc/rfc9001#section-4.9.2-1
+ c.discardKeys(now, handshakeSpace)
+}
+
+// discardKeys discards unused packet protection keys.
+// https://www.rfc-editor.org/rfc/rfc9001#section-4.9
+func (c *Conn) discardKeys(now time.Time, space numberSpace) {
+ switch space {
+ case initialSpace:
+ c.keysInitial.discard()
+ case handshakeSpace:
+ c.keysHandshake.discard()
+ }
+ c.loss.discardKeys(now, c.log, space)
+}
+
+// receiveTransportParameters applies transport parameters sent by the peer.
+func (c *Conn) receiveTransportParameters(p transportParameters) error {
+ isRetry := c.retryToken != nil
+ if err := c.connIDState.validateTransportParameters(c, isRetry, p); err != nil {
+ return err
+ }
+ c.streams.outflow.setMaxData(p.initialMaxData)
+ c.streams.localLimit[bidiStream].setMax(p.initialMaxStreamsBidi)
+ c.streams.localLimit[uniStream].setMax(p.initialMaxStreamsUni)
+ c.streams.peerInitialMaxStreamDataBidiLocal = p.initialMaxStreamDataBidiLocal
+ c.streams.peerInitialMaxStreamDataRemote[bidiStream] = p.initialMaxStreamDataBidiRemote
+ c.streams.peerInitialMaxStreamDataRemote[uniStream] = p.initialMaxStreamDataUni
+ c.receivePeerMaxIdleTimeout(p.maxIdleTimeout)
+ c.peerAckDelayExponent = p.ackDelayExponent
+ c.loss.setMaxAckDelay(p.maxAckDelay)
+ if err := c.connIDState.setPeerActiveConnIDLimit(c, p.activeConnIDLimit); err != nil {
+ return err
+ }
+ if p.preferredAddrConnID != nil {
+ var (
+ seq int64 = 1 // sequence number of this conn id is 1
+ retirePriorTo int64 = 0 // retire nothing
+ resetToken [16]byte
+ )
+ copy(resetToken[:], p.preferredAddrResetToken)
+ if err := c.connIDState.handleNewConnID(c, seq, retirePriorTo, p.preferredAddrConnID, resetToken); err != nil {
+ return err
+ }
+ }
+ // TODO: stateless_reset_token
+ // TODO: max_udp_payload_size
+ // TODO: disable_active_migration
+ // TODO: preferred_address
+ return nil
+}
+
+type (
+ timerEvent struct{}
+ wakeEvent struct{}
+)
+
+var errIdleTimeout = errors.New("idle timeout")
+
+// loop is the connection main loop.
+//
+// Except where otherwise noted, all connection state is owned by the loop goroutine.
+//
+// The loop processes messages from c.msgc and timer events.
+// Other goroutines may examine or modify conn state by sending the loop funcs to execute.
+func (c *Conn) loop(now time.Time) {
+ defer c.cleanup()
+
+ // The connection timer sends a message to the connection loop on expiry.
+ // We need to give it an expiry when creating it, so set the initial timeout to
+ // an arbitrary large value. The timer will be reset before this expires (and it
+ // isn't a problem if it does anyway). Skip creating the timer in tests which
+ // take control of the connection message loop.
+ var timer *time.Timer
+ var lastTimeout time.Time
+ hooks := c.testHooks
+ if hooks == nil {
+ timer = time.AfterFunc(1*time.Hour, func() {
+ c.sendMsg(timerEvent{})
+ })
+ defer timer.Stop()
+ }
+
+ for c.lifetime.state != connStateDone {
+ sendTimeout := c.maybeSend(now) // try sending
+
+ // Note that we only need to consider the ack timer for the App Data space,
+ // since the Initial and Handshake spaces always ack immediately.
+ nextTimeout := sendTimeout
+ nextTimeout = firstTime(nextTimeout, c.idle.nextTimeout)
+ if c.isAlive() {
+ nextTimeout = firstTime(nextTimeout, c.loss.timer)
+ nextTimeout = firstTime(nextTimeout, c.acks[appDataSpace].nextAck)
+ } else {
+ nextTimeout = firstTime(nextTimeout, c.lifetime.drainEndTime)
+ }
+
+ var m any
+ if hooks != nil {
+ // Tests only: Wait for the test to tell us to continue.
+ now, m = hooks.nextMessage(c.msgc, nextTimeout)
+ } else if !nextTimeout.IsZero() && nextTimeout.Before(now) {
+ // A connection timer has expired.
+ now = time.Now()
+ m = timerEvent{}
+ } else {
+ // Reschedule the connection timer if necessary
+ // and wait for the next event.
+ if !nextTimeout.Equal(lastTimeout) && !nextTimeout.IsZero() {
+ // Resetting a timer created with time.AfterFunc guarantees
+ // that the timer will run again. We might generate a spurious
+ // timer event under some circumstances, but that's okay.
+ timer.Reset(nextTimeout.Sub(now))
+ lastTimeout = nextTimeout
+ }
+ m = <-c.msgc
+ now = time.Now()
+ }
+ switch m := m.(type) {
+ case *datagram:
+ if !c.handleDatagram(now, m) {
+ if c.logEnabled(QLogLevelPacket) {
+ c.logPacketDropped(m)
+ }
+ }
+ m.recycle()
+ case timerEvent:
+ // A connection timer has expired.
+ if c.idleAdvance(now) {
+ // The connection idle timer has expired.
+ c.abortImmediately(now, errIdleTimeout)
+ return
+ }
+ c.loss.advance(now, c.handleAckOrLoss)
+ if c.lifetimeAdvance(now) {
+ // The connection has completed the draining period,
+ // and may be shut down.
+ return
+ }
+ case wakeEvent:
+ // We're being woken up to try sending some frames.
+ case func(time.Time, *Conn):
+ // Send a func to msgc to run it on the main Conn goroutine
+ m(now, c)
+ default:
+ panic(fmt.Sprintf("quic: unrecognized conn message %T", m))
+ }
+ }
+}
+
+func (c *Conn) cleanup() {
+ c.logConnectionClosed()
+ c.endpoint.connDrained(c)
+ c.tls.Close()
+ close(c.donec)
+}
+
+// sendMsg sends a message to the conn's loop.
+// It does not wait for the message to be processed.
+// The conn may close before processing the message, in which case it is lost.
+func (c *Conn) sendMsg(m any) {
+ select {
+ case c.msgc <- m:
+ case <-c.donec:
+ }
+}
+
+// wake wakes up the conn's loop.
+func (c *Conn) wake() {
+ select {
+ case c.msgc <- wakeEvent{}:
+ default:
+ }
+}
+
+// runOnLoop executes a function within the conn's loop goroutine.
+func (c *Conn) runOnLoop(ctx context.Context, f func(now time.Time, c *Conn)) error {
+ donec := make(chan struct{})
+ msg := func(now time.Time, c *Conn) {
+ defer close(donec)
+ f(now, c)
+ }
+ if c.testHooks != nil {
+ // In tests, we can't rely on being able to send a message immediately:
+ // c.msgc might be full, and testConnHooks.nextMessage might be waiting
+ // for us to block before it processes the next message.
+ // To avoid a deadlock, we send the message in waitUntil.
+ // If msgc is empty, the message is buffered.
+ // If msgc is full, we block and let nextMessage process the queue.
+ msgc := c.msgc
+ c.testHooks.waitUntil(ctx, func() bool {
+ for {
+ select {
+ case msgc <- msg:
+ msgc = nil // send msg only once
+ case <-donec:
+ return true
+ case <-c.donec:
+ return true
+ default:
+ return false
+ }
+ }
+ })
+ } else {
+ c.sendMsg(msg)
+ }
+ select {
+ case <-donec:
+ case <-c.donec:
+ return errors.New("quic: connection closed")
+ }
+ return nil
+}
+
+func (c *Conn) waitOnDone(ctx context.Context, ch <-chan struct{}) error {
+ if c.testHooks != nil {
+ return c.testHooks.waitUntil(ctx, func() bool {
+ select {
+ case <-ch:
+ return true
+ default:
+ }
+ return false
+ })
+ }
+ // Check the channel before the context.
+ // We always prefer to return results when available,
+ // even when provided with an already-canceled context.
+ select {
+ case <-ch:
+ return nil
+ default:
+ }
+ select {
+ case <-ch:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return nil
+}
+
+// firstTime returns the earliest non-zero time, or zero if both times are zero.
+func firstTime(a, b time.Time) time.Time {
+ switch {
+ case a.IsZero():
+ return b
+ case b.IsZero():
+ return a
+ case a.Before(b):
+ return a
+ default:
+ return b
+ }
+}
diff --git a/quic/conn_async_test.go b/quic/conn_async_test.go
new file mode 100644
index 000000000..4671f8340
--- /dev/null
+++ b/quic/conn_async_test.go
@@ -0,0 +1,187 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "sync"
+)
+
+// asyncTestState permits handling asynchronous operations in a synchronous test.
+//
+// For example, a test may want to write to a stream and observe that
+// STREAM frames are sent with the contents of the write in response
+// to MAX_STREAM_DATA frames received from the peer.
+// The Stream.Write is an asynchronous operation, but the test is simpler
+// if we can start the write, observe the first STREAM frame sent,
+// send a MAX_STREAM_DATA frame, observe the next STREAM frame sent, etc.
+//
+// We do this by instrumenting points where operations can block.
+// We start async operations like Write in a goroutine,
+// and wait for the operation to either finish or hit a blocking point.
+// When the connection event loop is idle, we check a list of
+// blocked operations to see if any can be woken.
+type asyncTestState struct {
+ mu sync.Mutex
+ notify chan struct{}
+ blocked map[*blockedAsync]struct{}
+}
+
+// An asyncOp is an asynchronous operation that results in (T, error).
+type asyncOp[T any] struct {
+ v T
+ err error
+
+ caller string
+ tc *testConn
+ donec chan struct{}
+ cancelFunc context.CancelFunc
+}
+
+// cancel cancels the async operation's context, and waits for
+// the operation to complete.
+func (a *asyncOp[T]) cancel() {
+ select {
+ case <-a.donec:
+ return // already done
+ default:
+ }
+ a.cancelFunc()
+ <-a.tc.asyncTestState.notify
+ select {
+ case <-a.donec:
+ default:
+ panic(fmt.Errorf("%v: async op failed to finish after being canceled", a.caller))
+ }
+}
+
+var errNotDone = errors.New("async op is not done")
+
+// result returns the result of the async operation.
+// It returns errNotDone if the operation is still in progress.
+//
+// Note that unlike a traditional async/await, this doesn't block
+// waiting for the operation to complete. Since tests have full
+// control over the progress of operations, an asyncOp can only
+// become done in reaction to the test taking some action.
+func (a *asyncOp[T]) result() (v T, err error) {
+ a.tc.wait()
+ select {
+ case <-a.donec:
+ return a.v, a.err
+ default:
+ return v, errNotDone
+ }
+}
+
+// A blockedAsync is a blocked async operation.
+type blockedAsync struct {
+ until func() bool // when this returns true, the operation is unblocked
+ donec chan struct{} // closed when the operation is unblocked
+}
+
+type asyncContextKey struct{}
+
+// runAsync starts an asynchronous operation.
+//
+// The function f should call a blocking function such as
+// Stream.Write or Conn.AcceptStream and return its result.
+// It must use the provided context.
+func runAsync[T any](tc *testConn, f func(context.Context) (T, error)) *asyncOp[T] {
+ as := &tc.asyncTestState
+ if as.notify == nil {
+ as.notify = make(chan struct{})
+ as.mu.Lock()
+ as.blocked = make(map[*blockedAsync]struct{})
+ as.mu.Unlock()
+ }
+ _, file, line, _ := runtime.Caller(1)
+ ctx := context.WithValue(context.Background(), asyncContextKey{}, true)
+ ctx, cancel := context.WithCancel(ctx)
+ a := &asyncOp[T]{
+ tc: tc,
+ caller: fmt.Sprintf("%v:%v", filepath.Base(file), line),
+ donec: make(chan struct{}),
+ cancelFunc: cancel,
+ }
+ go func() {
+ a.v, a.err = f(ctx)
+ close(a.donec)
+ as.notify <- struct{}{}
+ }()
+ tc.t.Cleanup(func() {
+ if _, err := a.result(); err == errNotDone {
+ tc.t.Errorf("%v: async operation is still executing at end of test", a.caller)
+ a.cancel()
+ }
+ })
+ // Wait for the operation to either finish or block.
+ <-as.notify
+ tc.wait()
+ return a
+}
+
+// waitUntil waits for a blocked async operation to complete.
+// The operation is complete when the until func returns true.
+func (as *asyncTestState) waitUntil(ctx context.Context, until func() bool) error {
+ if until() {
+ return nil
+ }
+ if err := ctx.Err(); err != nil {
+ // Context has already expired.
+ return err
+ }
+ if ctx.Value(asyncContextKey{}) == nil {
+ // Context is not one that we've created, and hasn't expired.
+ // This probably indicates that we've tried to perform a
+ // blocking operation without using the async test harness here,
+ // which may have unpredictable results.
+ panic("blocking async point with unexpected Context")
+ }
+ b := &blockedAsync{
+ until: until,
+ donec: make(chan struct{}),
+ }
+ // Record this as a pending blocking operation.
+ as.mu.Lock()
+ as.blocked[b] = struct{}{}
+ as.mu.Unlock()
+ // Notify the creator of the operation that we're blocked,
+ // and wait to be woken up.
+ as.notify <- struct{}{}
+ select {
+ case <-b.donec:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return nil
+}
+
+// wakeAsync tries to wake up a blocked async operation.
+// It returns true if one was woken, false otherwise.
+func (as *asyncTestState) wakeAsync() bool {
+ as.mu.Lock()
+ var woken *blockedAsync
+ for w := range as.blocked {
+ if w.until() {
+ woken = w
+ delete(as.blocked, w)
+ break
+ }
+ }
+ as.mu.Unlock()
+ if woken == nil {
+ return false
+ }
+ close(woken.donec)
+ <-as.notify // must not hold as.mu while blocked here
+ return true
+}
diff --git a/quic/conn_close.go b/quic/conn_close.go
new file mode 100644
index 000000000..1798d0536
--- /dev/null
+++ b/quic/conn_close.go
@@ -0,0 +1,331 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "errors"
+ "time"
+)
+
+// connState is the state of a connection.
+type connState int
+
+const (
+ // A connection is alive when it is first created.
+ connStateAlive = connState(iota)
+
+ // The connection has received a CONNECTION_CLOSE frame from the peer,
+ // and has not yet sent a CONNECTION_CLOSE in response.
+ //
+ // We will send a CONNECTION_CLOSE, and then enter the draining state.
+ connStatePeerClosed
+
+ // The connection is in the closing state.
+ //
+ // We will send CONNECTION_CLOSE frames to the peer
+ // (once upon entering the closing state, and possibly again in response to peer packets).
+ //
+ // If we receive a CONNECTION_CLOSE from the peer, we will enter the draining state.
+ // Otherwise, we will eventually time out and move to the done state.
+ //
+ // https://www.rfc-editor.org/rfc/rfc9000#section-10.2.1
+ connStateClosing
+
+ // The connection is in the draining state.
+ //
+ // We will neither send packets nor process received packets.
+ // When the drain timer expires, we move to the done state.
+ //
+ // https://www.rfc-editor.org/rfc/rfc9000#section-10.2.2
+ connStateDraining
+
+ // The connection is done, and the conn loop will exit.
+ connStateDone
+)
+
+// lifetimeState tracks the state of a connection.
+//
+// This is fairly coupled to the rest of a Conn, but putting it in a struct of its own helps
+// reason about operations that cause state transitions.
+type lifetimeState struct {
+ state connState
+
+ readyc chan struct{} // closed when TLS handshake completes
+ donec chan struct{} // closed when finalErr is set
+
+ localErr error // error sent to the peer
+ finalErr error // error sent by the peer, or transport error; set before closing donec
+
+ connCloseSentTime time.Time // send time of last CONNECTION_CLOSE frame
+ connCloseDelay time.Duration // delay until next CONNECTION_CLOSE frame sent
+ drainEndTime time.Time // time the connection exits the draining state
+}
+
+func (c *Conn) lifetimeInit() {
+ c.lifetime.readyc = make(chan struct{})
+ c.lifetime.donec = make(chan struct{})
+}
+
+var (
+ errNoPeerResponse = errors.New("peer did not respond to CONNECTION_CLOSE")
+ errConnClosed = errors.New("connection closed")
+)
+
+// advance is called when time passes.
+func (c *Conn) lifetimeAdvance(now time.Time) (done bool) {
+ if c.lifetime.drainEndTime.IsZero() || c.lifetime.drainEndTime.After(now) {
+ return false
+ }
+ // The connection drain period has ended, and we can shut down.
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2-7
+ c.lifetime.drainEndTime = time.Time{}
+ if c.lifetime.state != connStateDraining {
+ // We were in the closing state, waiting for a CONNECTION_CLOSE from the peer.
+ c.setFinalError(errNoPeerResponse)
+ }
+ c.setState(now, connStateDone)
+ return true
+}
+
+// setState sets the conn state.
+func (c *Conn) setState(now time.Time, state connState) {
+ if c.lifetime.state == state {
+ return
+ }
+ c.lifetime.state = state
+ switch state {
+ case connStateClosing, connStateDraining:
+ if c.lifetime.drainEndTime.IsZero() {
+ c.lifetime.drainEndTime = now.Add(3 * c.loss.ptoBasePeriod())
+ }
+ case connStateDone:
+ c.setFinalError(nil)
+ }
+ if state != connStateAlive {
+ c.streamsCleanup()
+ }
+}
+
+// confirmHandshake is called when the TLS handshake completes.
+func (c *Conn) handshakeDone() {
+ close(c.lifetime.readyc)
+}
+
+// isDraining reports whether the conn is in the draining state.
+//
+// The draining state is entered once an endpoint receives a CONNECTION_CLOSE frame.
+// The endpoint will no longer send any packets, but we retain knowledge of the connection
+// until the end of the drain period to ensure we discard packets for the connection
+// rather than treating them as starting a new connection.
+//
+// https://www.rfc-editor.org/rfc/rfc9000.html#section-10.2.2
+func (c *Conn) isDraining() bool {
+ switch c.lifetime.state {
+ case connStateDraining, connStateDone:
+ return true
+ }
+ return false
+}
+
+// isAlive reports whether the conn is handling packets.
+func (c *Conn) isAlive() bool {
+ return c.lifetime.state == connStateAlive
+}
+
+// sendOK reports whether the conn can send frames at this time.
+func (c *Conn) sendOK(now time.Time) bool {
+ switch c.lifetime.state {
+ case connStateAlive:
+ return true
+ case connStatePeerClosed:
+ if c.lifetime.localErr == nil {
+ // We're waiting for the user to close the connection, providing us with
+ // a final status to send to the peer.
+ return false
+ }
+ // We should send a CONNECTION_CLOSE.
+ return true
+ case connStateClosing:
+ if c.lifetime.connCloseSentTime.IsZero() {
+ return true
+ }
+ maxRecvTime := c.acks[initialSpace].maxRecvTime
+ if t := c.acks[handshakeSpace].maxRecvTime; t.After(maxRecvTime) {
+ maxRecvTime = t
+ }
+ if t := c.acks[appDataSpace].maxRecvTime; t.After(maxRecvTime) {
+ maxRecvTime = t
+ }
+ if maxRecvTime.Before(c.lifetime.connCloseSentTime.Add(c.lifetime.connCloseDelay)) {
+ // After sending CONNECTION_CLOSE, ignore packets from the peer for
+ // a delay. On the next packet received after the delay, send another
+ // CONNECTION_CLOSE.
+ return false
+ }
+ return true
+ case connStateDraining:
+ // We are in the draining state, and will send no more packets.
+ return false
+ case connStateDone:
+ return false
+ default:
+ panic("BUG: unhandled connection state")
+ }
+}
+
+// sendConnectionClose reports that the conn has sent a CONNECTION_CLOSE to the peer.
+func (c *Conn) sentConnectionClose(now time.Time) {
+ switch c.lifetime.state {
+ case connStatePeerClosed:
+ c.enterDraining(now)
+ }
+ if c.lifetime.connCloseSentTime.IsZero() {
+ // Set the initial delay before we will send another CONNECTION_CLOSE.
+ //
+ // RFC 9000 states that we should rate limit CONNECTION_CLOSE frames,
+ // but leaves the implementation of the limit up to us. Here, we start
+ // with the same delay as the PTO timer (RFC 9002, Section 6.2.1),
+ // not including max_ack_delay, and double it on every CONNECTION_CLOSE sent.
+ c.lifetime.connCloseDelay = c.loss.rtt.smoothedRTT + max(4*c.loss.rtt.rttvar, timerGranularity)
+ } else if !c.lifetime.connCloseSentTime.Equal(now) {
+ // If connCloseSentTime == now, we're sending two CONNECTION_CLOSE frames
+ // coalesced into the same datagram. We only want to increase the delay once.
+ c.lifetime.connCloseDelay *= 2
+ }
+ c.lifetime.connCloseSentTime = now
+}
+
+// handlePeerConnectionClose handles a CONNECTION_CLOSE from the peer.
+func (c *Conn) handlePeerConnectionClose(now time.Time, err error) {
+ c.setFinalError(err)
+ switch c.lifetime.state {
+ case connStateAlive:
+ c.setState(now, connStatePeerClosed)
+ case connStatePeerClosed:
+ // Duplicate CONNECTION_CLOSE, ignore.
+ case connStateClosing:
+ if c.lifetime.connCloseSentTime.IsZero() {
+ c.setState(now, connStatePeerClosed)
+ } else {
+ c.setState(now, connStateDraining)
+ }
+ case connStateDraining:
+ case connStateDone:
+ }
+}
+
+// setFinalError records the final connection status we report to the user.
+func (c *Conn) setFinalError(err error) {
+ select {
+ case <-c.lifetime.donec:
+ return // already set
+ default:
+ }
+ c.lifetime.finalErr = err
+ close(c.lifetime.donec)
+}
+
+func (c *Conn) waitReady(ctx context.Context) error {
+ select {
+ case <-c.lifetime.readyc:
+ return nil
+ case <-c.lifetime.donec:
+ return c.lifetime.finalErr
+ default:
+ }
+ select {
+ case <-c.lifetime.readyc:
+ return nil
+ case <-c.lifetime.donec:
+ return c.lifetime.finalErr
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// Close closes the connection.
+//
+// Close is equivalent to:
+//
+// conn.Abort(nil)
+// err := conn.Wait(context.Background())
+func (c *Conn) Close() error {
+ c.Abort(nil)
+ <-c.lifetime.donec
+ return c.lifetime.finalErr
+}
+
+// Wait waits for the peer to close the connection.
+//
+// If the connection is closed locally and the peer does not close its end of the connection,
+// Wait will return with a non-nil error after the drain period expires.
+//
+// If the peer closes the connection with a NO_ERROR transport error, Wait returns nil.
+// If the peer closes the connection with an application error, Wait returns an ApplicationError
+// containing the peer's error code and reason.
+// If the peer closes the connection with any other status, Wait returns a non-nil error.
+func (c *Conn) Wait(ctx context.Context) error {
+ if err := c.waitOnDone(ctx, c.lifetime.donec); err != nil {
+ return err
+ }
+ return c.lifetime.finalErr
+}
+
+// Abort closes the connection and returns immediately.
+//
+// If err is nil, Abort sends a transport error of NO_ERROR to the peer.
+// If err is an ApplicationError, Abort sends its error code and text.
+// Otherwise, Abort sends a transport error of APPLICATION_ERROR with the error's text.
+func (c *Conn) Abort(err error) {
+ if err == nil {
+ err = localTransportError{code: errNo}
+ }
+ c.sendMsg(func(now time.Time, c *Conn) {
+ c.enterClosing(now, err)
+ })
+}
+
+// abort terminates a connection with an error.
+func (c *Conn) abort(now time.Time, err error) {
+ c.setFinalError(err) // this error takes precedence over the peer's CONNECTION_CLOSE
+ c.enterClosing(now, err)
+}
+
+// abortImmediately terminates a connection.
+// The connection does not send a CONNECTION_CLOSE, and skips the draining period.
+func (c *Conn) abortImmediately(now time.Time, err error) {
+ c.setFinalError(err)
+ c.setState(now, connStateDone)
+}
+
+// enterClosing starts an immediate close.
+// We will send a CONNECTION_CLOSE to the peer and wait for their response.
+func (c *Conn) enterClosing(now time.Time, err error) {
+ switch c.lifetime.state {
+ case connStateAlive:
+ c.lifetime.localErr = err
+ c.setState(now, connStateClosing)
+ case connStatePeerClosed:
+ c.lifetime.localErr = err
+ }
+}
+
+// enterDraining moves directly to the draining state, without sending a CONNECTION_CLOSE.
+func (c *Conn) enterDraining(now time.Time) {
+ switch c.lifetime.state {
+ case connStateAlive, connStatePeerClosed, connStateClosing:
+ c.setState(now, connStateDraining)
+ }
+}
+
+// exit fully terminates a connection immediately.
+func (c *Conn) exit() {
+ c.sendMsg(func(now time.Time, c *Conn) {
+ c.abortImmediately(now, errors.New("connection closed"))
+ })
+}
diff --git a/quic/conn_close_test.go b/quic/conn_close_test.go
new file mode 100644
index 000000000..213975011
--- /dev/null
+++ b/quic/conn_close_test.go
@@ -0,0 +1,282 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "testing"
+ "time"
+)
+
+func TestConnCloseResponseBackoff(t *testing.T) {
+ tc := newTestConn(t, clientSide, func(c *Config) {
+ clear(c.StatelessResetKey[:])
+ })
+ tc.handshake()
+
+ tc.conn.Abort(nil)
+ tc.wantFrame("aborting connection generates CONN_CLOSE",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errNo,
+ })
+
+ waiting := runAsync(tc, func(ctx context.Context) (struct{}, error) {
+ return struct{}{}, tc.conn.Wait(ctx)
+ })
+ if _, err := waiting.result(); err != errNotDone {
+ t.Errorf("conn.Wait() = %v, want still waiting", err)
+ }
+
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.wantIdle("packets received immediately after CONN_CLOSE receive no response")
+
+ tc.advance(1100 * time.Microsecond)
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.wantFrame("receiving packet 1.1ms after CONN_CLOSE generates another CONN_CLOSE",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errNo,
+ })
+
+ tc.advance(1100 * time.Microsecond)
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.wantIdle("no response to packet, because CONN_CLOSE backoff is now 2ms")
+
+ tc.advance(1000 * time.Microsecond)
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.wantFrame("2ms since last CONN_CLOSE, receiving a packet generates another CONN_CLOSE",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errNo,
+ })
+ if _, err := waiting.result(); err != errNotDone {
+ t.Errorf("conn.Wait() = %v, want still waiting", err)
+ }
+
+ tc.advance(100000 * time.Microsecond)
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.wantIdle("drain timer expired, no more responses")
+
+ if _, err := waiting.result(); !errors.Is(err, errNoPeerResponse) {
+ t.Errorf("blocked conn.Wait() = %v, want errNoPeerResponse", err)
+ }
+ if err := tc.conn.Wait(canceledContext()); !errors.Is(err, errNoPeerResponse) {
+ t.Errorf("non-blocking conn.Wait() = %v, want errNoPeerResponse", err)
+ }
+}
+
+func TestConnCloseWithPeerResponse(t *testing.T) {
+ qr := &qlogRecord{}
+ tc := newTestConn(t, clientSide, qr.config)
+ tc.handshake()
+
+ tc.conn.Abort(nil)
+ tc.wantFrame("aborting connection generates CONN_CLOSE",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errNo,
+ })
+
+ waiting := runAsync(tc, func(ctx context.Context) (struct{}, error) {
+ return struct{}{}, tc.conn.Wait(ctx)
+ })
+ if _, err := waiting.result(); err != errNotDone {
+ t.Errorf("conn.Wait() = %v, want still waiting", err)
+ }
+
+ tc.writeFrames(packetType1RTT, debugFrameConnectionCloseApplication{
+ code: 20,
+ })
+
+ wantErr := &ApplicationError{
+ Code: 20,
+ }
+ if _, err := waiting.result(); !errors.Is(err, wantErr) {
+ t.Errorf("blocked conn.Wait() = %v, want %v", err, wantErr)
+ }
+ if err := tc.conn.Wait(canceledContext()); !errors.Is(err, wantErr) {
+ t.Errorf("non-blocking conn.Wait() = %v, want %v", err, wantErr)
+ }
+
+ tc.advance(1 * time.Second) // long enough to exit the draining state
+ qr.wantEvents(t, jsonEvent{
+ "name": "connectivity:connection_closed",
+ "data": map[string]any{
+ "trigger": "application",
+ },
+ })
+}
+
+func TestConnClosePeerCloses(t *testing.T) {
+ qr := &qlogRecord{}
+ tc := newTestConn(t, clientSide, qr.config)
+ tc.handshake()
+
+ wantErr := &ApplicationError{
+ Code: 42,
+ Reason: "why?",
+ }
+ tc.writeFrames(packetType1RTT, debugFrameConnectionCloseApplication{
+ code: wantErr.Code,
+ reason: wantErr.Reason,
+ })
+ tc.wantIdle("CONN_CLOSE response not sent until user closes this side")
+
+ if err := tc.conn.Wait(canceledContext()); !errors.Is(err, wantErr) {
+ t.Errorf("conn.Wait() = %v, want %v", err, wantErr)
+ }
+
+ tc.conn.Abort(&ApplicationError{
+ Code: 9,
+ Reason: "because",
+ })
+ tc.wantFrame("CONN_CLOSE sent after user closes connection",
+ packetType1RTT, debugFrameConnectionCloseApplication{
+ code: 9,
+ reason: "because",
+ })
+
+ tc.advance(1 * time.Second) // long enough to exit the draining state
+ qr.wantEvents(t, jsonEvent{
+ "name": "connectivity:connection_closed",
+ "data": map[string]any{
+ "trigger": "application",
+ },
+ })
+}
+
+func TestConnCloseReceiveInInitial(t *testing.T) {
+ tc := newTestConn(t, clientSide)
+ tc.wantFrame("client sends Initial CRYPTO frame",
+ packetTypeInitial, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelInitial],
+ })
+ tc.writeFrames(packetTypeInitial, debugFrameConnectionCloseTransport{
+ code: errConnectionRefused,
+ })
+ tc.wantIdle("CONN_CLOSE response not sent until user closes this side")
+
+ wantErr := peerTransportError{code: errConnectionRefused}
+ if err := tc.conn.Wait(canceledContext()); !errors.Is(err, wantErr) {
+ t.Errorf("conn.Wait() = %v, want %v", err, wantErr)
+ }
+
+ tc.conn.Abort(&ApplicationError{Code: 1})
+ tc.wantFrame("CONN_CLOSE in Initial frame is APPLICATION_ERROR",
+ packetTypeInitial, debugFrameConnectionCloseTransport{
+ code: errApplicationError,
+ })
+ tc.wantIdle("no more frames to send")
+}
+
+func TestConnCloseReceiveInHandshake(t *testing.T) {
+ tc := newTestConn(t, clientSide)
+ tc.ignoreFrame(frameTypeAck)
+ tc.wantFrame("client sends Initial CRYPTO frame",
+ packetTypeInitial, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelInitial],
+ })
+ tc.writeFrames(packetTypeInitial, debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ tc.writeFrames(packetTypeHandshake, debugFrameConnectionCloseTransport{
+ code: errConnectionRefused,
+ })
+ tc.wantIdle("CONN_CLOSE response not sent until user closes this side")
+
+ wantErr := peerTransportError{code: errConnectionRefused}
+ if err := tc.conn.Wait(canceledContext()); !errors.Is(err, wantErr) {
+ t.Errorf("conn.Wait() = %v, want %v", err, wantErr)
+ }
+
+ // The conn has Initial and Handshake keys, so it will send CONN_CLOSE in both spaces.
+ tc.conn.Abort(&ApplicationError{Code: 1})
+ tc.wantFrame("CONN_CLOSE in Initial frame is APPLICATION_ERROR",
+ packetTypeInitial, debugFrameConnectionCloseTransport{
+ code: errApplicationError,
+ })
+ tc.wantFrame("CONN_CLOSE in Handshake frame is APPLICATION_ERROR",
+ packetTypeHandshake, debugFrameConnectionCloseTransport{
+ code: errApplicationError,
+ })
+ tc.wantIdle("no more frames to send")
+}
+
+func TestConnCloseClosedByEndpoint(t *testing.T) {
+ ctx := canceledContext()
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+
+ tc.endpoint.e.Close(ctx)
+ tc.wantFrame("endpoint closes connection before exiting",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errNo,
+ })
+}
+
+func testConnCloseUnblocks(t *testing.T, f func(context.Context, *testConn) error, opts ...any) {
+ tc := newTestConn(t, clientSide, opts...)
+ tc.handshake()
+ op := runAsync(tc, func(ctx context.Context) (struct{}, error) {
+ return struct{}{}, f(ctx, tc)
+ })
+ if _, err := op.result(); err != errNotDone {
+ t.Fatalf("before abort, op = %v, want errNotDone", err)
+ }
+ tc.conn.Abort(nil)
+ if _, err := op.result(); err == nil || err == errNotDone {
+ t.Fatalf("after abort, op = %v, want error", err)
+ }
+}
+
+func TestConnCloseUnblocksAcceptStream(t *testing.T) {
+ testConnCloseUnblocks(t, func(ctx context.Context, tc *testConn) error {
+ _, err := tc.conn.AcceptStream(ctx)
+ return err
+ }, permissiveTransportParameters)
+}
+
+func TestConnCloseUnblocksNewStream(t *testing.T) {
+ testConnCloseUnblocks(t, func(ctx context.Context, tc *testConn) error {
+ _, err := tc.conn.NewStream(ctx)
+ return err
+ })
+}
+
+func TestConnCloseUnblocksStreamRead(t *testing.T) {
+ testConnCloseUnblocks(t, func(ctx context.Context, tc *testConn) error {
+ s := newLocalStream(t, tc, bidiStream)
+ s.SetReadContext(ctx)
+ buf := make([]byte, 16)
+ _, err := s.Read(buf)
+ return err
+ }, permissiveTransportParameters)
+}
+
+func TestConnCloseUnblocksStreamWrite(t *testing.T) {
+ testConnCloseUnblocks(t, func(ctx context.Context, tc *testConn) error {
+ s := newLocalStream(t, tc, bidiStream)
+ s.SetWriteContext(ctx)
+ buf := make([]byte, 32)
+ _, err := s.Write(buf)
+ return err
+ }, permissiveTransportParameters, func(c *Config) {
+ c.MaxStreamWriteBufferSize = 16
+ })
+}
+
+func TestConnCloseUnblocksStreamClose(t *testing.T) {
+ testConnCloseUnblocks(t, func(ctx context.Context, tc *testConn) error {
+ s := newLocalStream(t, tc, bidiStream)
+ s.SetWriteContext(ctx)
+ buf := make([]byte, 16)
+ _, err := s.Write(buf)
+ if err != nil {
+ return err
+ }
+ return s.Close()
+ }, permissiveTransportParameters)
+}
diff --git a/quic/conn_flow.go b/quic/conn_flow.go
new file mode 100644
index 000000000..8b69ef7db
--- /dev/null
+++ b/quic/conn_flow.go
@@ -0,0 +1,144 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "sync/atomic"
+ "time"
+)
+
+// connInflow tracks connection-level flow control for data sent by the peer to us.
+//
+// There are four byte offsets of significance in the stream of data received from the peer,
+// each >= to the previous:
+//
+// - bytes read by the user
+// - bytes received from the peer
+// - limit sent to the peer in a MAX_DATA frame
+// - potential new limit to sent to the peer
+//
+// We maintain a flow control window, so as bytes are read by the user
+// the potential limit is extended correspondingly.
+//
+// We keep an atomic counter of bytes read by the user and not yet applied to the
+// potential limit (credit). When this count grows large enough, we update the
+// new limit to send and mark that we need to send a new MAX_DATA frame.
+type connInflow struct {
+ sent sentVal // set when we need to send a MAX_DATA update to the peer
+ usedLimit int64 // total bytes sent by the peer, must be less than sentLimit
+ sentLimit int64 // last MAX_DATA sent to the peer
+ newLimit int64 // new MAX_DATA to send
+
+ credit atomic.Int64 // bytes read but not yet applied to extending the flow-control window
+}
+
+func (c *Conn) inflowInit() {
+ // The initial MAX_DATA limit is sent as a transport parameter.
+ c.streams.inflow.sentLimit = c.config.maxConnReadBufferSize()
+ c.streams.inflow.newLimit = c.streams.inflow.sentLimit
+}
+
+// handleStreamBytesReadOffLoop records that the user has consumed bytes from a stream.
+// We may extend the peer's flow control window.
+//
+// This is called indirectly by the user, via Read or CloseRead.
+func (c *Conn) handleStreamBytesReadOffLoop(n int64) {
+ if n == 0 {
+ return
+ }
+ if c.shouldUpdateFlowControl(c.streams.inflow.credit.Add(n)) {
+ // We should send a MAX_DATA update to the peer.
+ // Record this on the Conn's main loop.
+ c.sendMsg(func(now time.Time, c *Conn) {
+ // A MAX_DATA update may have already happened, so check again.
+ if c.shouldUpdateFlowControl(c.streams.inflow.credit.Load()) {
+ c.sendMaxDataUpdate()
+ }
+ })
+ }
+}
+
+// handleStreamBytesReadOnLoop extends the peer's flow control window after
+// data has been discarded due to a RESET_STREAM frame.
+//
+// This is called on the conn's loop.
+func (c *Conn) handleStreamBytesReadOnLoop(n int64) {
+ if c.shouldUpdateFlowControl(c.streams.inflow.credit.Add(n)) {
+ c.sendMaxDataUpdate()
+ }
+}
+
+func (c *Conn) sendMaxDataUpdate() {
+ c.streams.inflow.sent.setUnsent()
+ // Apply current credit to the limit.
+ // We don't strictly need to do this here
+ // since appendMaxDataFrame will do so as well,
+ // but this avoids redundant trips down this path
+ // if the MAX_DATA frame doesn't go out right away.
+ c.streams.inflow.newLimit += c.streams.inflow.credit.Swap(0)
+}
+
+func (c *Conn) shouldUpdateFlowControl(credit int64) bool {
+ return shouldUpdateFlowControl(c.config.maxConnReadBufferSize(), credit)
+}
+
+// handleStreamBytesReceived records that the peer has sent us stream data.
+func (c *Conn) handleStreamBytesReceived(n int64) error {
+ c.streams.inflow.usedLimit += n
+ if c.streams.inflow.usedLimit > c.streams.inflow.sentLimit {
+ return localTransportError{
+ code: errFlowControl,
+ reason: "stream exceeded flow control limit",
+ }
+ }
+ return nil
+}
+
+// appendMaxDataFrame appends a MAX_DATA frame to the current packet.
+//
+// It returns true if no more frames need appending,
+// false if it could not fit a frame in the current packet.
+func (c *Conn) appendMaxDataFrame(w *packetWriter, pnum packetNumber, pto bool) bool {
+ if c.streams.inflow.sent.shouldSendPTO(pto) {
+ // Add any unapplied credit to the new limit now.
+ c.streams.inflow.newLimit += c.streams.inflow.credit.Swap(0)
+ if !w.appendMaxDataFrame(c.streams.inflow.newLimit) {
+ return false
+ }
+ c.streams.inflow.sentLimit += c.streams.inflow.newLimit
+ c.streams.inflow.sent.setSent(pnum)
+ }
+ return true
+}
+
+// ackOrLossMaxData records the fate of a MAX_DATA frame.
+func (c *Conn) ackOrLossMaxData(pnum packetNumber, fate packetFate) {
+ c.streams.inflow.sent.ackLatestOrLoss(pnum, fate)
+}
+
+// connOutflow tracks connection-level flow control for data sent by us to the peer.
+type connOutflow struct {
+ max int64 // largest MAX_DATA received from peer
+ used int64 // total bytes of STREAM data sent to peer
+}
+
+// setMaxData updates the connection-level flow control limit
+// with the initial limit conveyed in transport parameters
+// or an update from a MAX_DATA frame.
+func (f *connOutflow) setMaxData(maxData int64) {
+ f.max = max(f.max, maxData)
+}
+
+// avail returns the number of connection-level flow control bytes available.
+func (f *connOutflow) avail() int64 {
+ return f.max - f.used
+}
+
+// consume records consumption of n bytes of flow.
+func (f *connOutflow) consume(n int64) {
+ f.used += n
+}
diff --git a/quic/conn_flow_test.go b/quic/conn_flow_test.go
new file mode 100644
index 000000000..260684bdb
--- /dev/null
+++ b/quic/conn_flow_test.go
@@ -0,0 +1,436 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "testing"
+)
+
+func TestConnInflowReturnOnRead(t *testing.T) {
+ tc, s := newTestConnAndRemoteStream(t, serverSide, uniStream, func(c *Config) {
+ c.MaxConnReadBufferSize = 64
+ })
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ data: make([]byte, 8),
+ })
+ if n, err := s.Read(make([]byte, 8)); n != 8 || err != nil {
+ t.Fatalf("s.Read() = %v, %v; want %v, nil", n, err, 8)
+ }
+ tc.wantFrame("available window increases, send a MAX_DATA",
+ packetType1RTT, debugFrameMaxData{
+ max: 64 + 8,
+ })
+ // Peer can write up to the new limit.
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 8,
+ data: make([]byte, 64),
+ })
+ if n, err := s.Read(make([]byte, 64+1)); n != 64 {
+ t.Fatalf("s.Read() = %v, %v; want %v, anything", n, err, 64)
+ }
+ tc.wantFrame("available window increases, send a MAX_DATA",
+ packetType1RTT, debugFrameMaxData{
+ max: 64 + 8 + 64,
+ })
+ tc.wantIdle("connection is idle")
+}
+
+func TestConnInflowReturnOnRacingReads(t *testing.T) {
+ // Perform two reads at the same time,
+ // one for half of MaxConnReadBufferSize
+ // and one for one byte.
+ //
+ // We should observe a single MAX_DATA update.
+ // Depending on the ordering of events,
+ // this may include the credit from just the larger read
+ // or the credit from both.
+ ctx := canceledContext()
+ tc := newTestConn(t, serverSide, func(c *Config) {
+ c.MaxConnReadBufferSize = 64
+ })
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, uniStream, 0),
+ data: make([]byte, 16),
+ })
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, uniStream, 1),
+ data: make([]byte, 1),
+ })
+ s1, err := tc.conn.AcceptStream(ctx)
+ if err != nil {
+ t.Fatalf("conn.AcceptStream() = %v", err)
+ }
+ s2, err := tc.conn.AcceptStream(ctx)
+ if err != nil {
+ t.Fatalf("conn.AcceptStream() = %v", err)
+ }
+ read1 := runAsync(tc, func(ctx context.Context) (int, error) {
+ return s1.Read(make([]byte, 16))
+ })
+ read2 := runAsync(tc, func(ctx context.Context) (int, error) {
+ return s2.Read(make([]byte, 1))
+ })
+ // This MAX_DATA might extend the window by 16 or 17, depending on
+ // whether the second write occurs before the update happens.
+ tc.wantFrameType("MAX_DATA update is sent",
+ packetType1RTT, debugFrameMaxData{})
+ tc.wantIdle("redundant MAX_DATA is not sent")
+ if _, err := read1.result(); err != nil {
+ t.Errorf("Read #1 = %v", err)
+ }
+ if _, err := read2.result(); err != nil {
+ t.Errorf("Read #2 = %v", err)
+ }
+}
+
+func TestConnInflowReturnOnClose(t *testing.T) {
+ tc, s := newTestConnAndRemoteStream(t, serverSide, uniStream, func(c *Config) {
+ c.MaxConnReadBufferSize = 64
+ })
+ tc.ignoreFrame(frameTypeStopSending)
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ data: make([]byte, 64),
+ })
+ s.CloseRead()
+ tc.wantFrame("closing stream updates connection-level flow control",
+ packetType1RTT, debugFrameMaxData{
+ max: 128,
+ })
+}
+
+func TestConnInflowReturnOnReset(t *testing.T) {
+ tc, s := newTestConnAndRemoteStream(t, serverSide, uniStream, func(c *Config) {
+ c.MaxConnReadBufferSize = 64
+ })
+ tc.ignoreFrame(frameTypeStopSending)
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ data: make([]byte, 32),
+ })
+ tc.writeFrames(packetType1RTT, debugFrameResetStream{
+ id: s.id,
+ finalSize: 64,
+ })
+ s.CloseRead()
+ tc.wantFrame("receiving stream reseet updates connection-level flow control",
+ packetType1RTT, debugFrameMaxData{
+ max: 128,
+ })
+}
+
+func TestConnInflowStreamViolation(t *testing.T) {
+ tc := newTestConn(t, serverSide, func(c *Config) {
+ c.MaxConnReadBufferSize = 100
+ })
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ // Total MAX_DATA consumed: 50
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, bidiStream, 0),
+ data: make([]byte, 50),
+ })
+ // Total MAX_DATA consumed: 80
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, uniStream, 0),
+ off: 20,
+ data: make([]byte, 10),
+ })
+ // Total MAX_DATA consumed: 100
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, bidiStream, 0),
+ off: 70,
+ fin: true,
+ })
+ // This stream has already consumed quota for these bytes.
+ // Total MAX_DATA consumed: 100
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, uniStream, 0),
+ data: make([]byte, 20),
+ })
+ tc.wantIdle("peer has consumed all MAX_DATA quota")
+
+ // Total MAX_DATA consumed: 101
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, bidiStream, 2),
+ data: make([]byte, 1),
+ })
+ tc.wantFrame("peer violates MAX_DATA limit",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errFlowControl,
+ })
+}
+
+func TestConnInflowResetViolation(t *testing.T) {
+ tc := newTestConn(t, serverSide, func(c *Config) {
+ c.MaxConnReadBufferSize = 100
+ })
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, bidiStream, 0),
+ data: make([]byte, 100),
+ })
+ tc.wantIdle("peer has consumed all MAX_DATA quota")
+
+ tc.writeFrames(packetType1RTT, debugFrameResetStream{
+ id: newStreamID(clientSide, uniStream, 0),
+ finalSize: 0,
+ })
+ tc.wantIdle("stream reset does not consume MAX_DATA quota, no error")
+
+ tc.writeFrames(packetType1RTT, debugFrameResetStream{
+ id: newStreamID(clientSide, uniStream, 1),
+ finalSize: 1,
+ })
+ tc.wantFrame("RESET_STREAM final size violates MAX_DATA limit",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errFlowControl,
+ })
+}
+
+func TestConnInflowMultipleStreams(t *testing.T) {
+ tc := newTestConn(t, serverSide, func(c *Config) {
+ c.MaxConnReadBufferSize = 128
+ })
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ var streams []*Stream
+ for _, id := range []streamID{
+ newStreamID(clientSide, uniStream, 0),
+ newStreamID(clientSide, uniStream, 1),
+ newStreamID(clientSide, bidiStream, 0),
+ newStreamID(clientSide, bidiStream, 1),
+ } {
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: id,
+ data: make([]byte, 1),
+ })
+ s := tc.acceptStream()
+ streams = append(streams, s)
+ if n, err := s.Read(make([]byte, 1)); err != nil || n != 1 {
+ t.Fatalf("s.Read() = %v, %v; want 1, nil", n, err)
+ }
+ }
+ tc.wantIdle("streams have read data, but not enough to update MAX_DATA")
+
+ for _, s := range streams {
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 1,
+ data: make([]byte, 31),
+ })
+ }
+
+ if n, err := streams[0].Read(make([]byte, 32)); n != 31 {
+ t.Fatalf("s.Read() = %v, %v; want 31, anything", n, err)
+ }
+ tc.wantFrame("read enough data to trigger a MAX_DATA update",
+ packetType1RTT, debugFrameMaxData{
+ max: 128 + 32 + 1 + 1 + 1,
+ })
+
+ tc.ignoreFrame(frameTypeStopSending)
+ streams[2].CloseRead()
+ tc.wantFrame("closed stream triggers another MAX_DATA update",
+ packetType1RTT, debugFrameMaxData{
+ max: 128 + 32 + 1 + 32 + 1,
+ })
+}
+
+func TestConnOutflowBlocked(t *testing.T) {
+ tc, s := newTestConnAndLocalStream(t, clientSide, uniStream,
+ permissiveTransportParameters,
+ func(p *transportParameters) {
+ p.initialMaxData = 10
+ })
+ tc.ignoreFrame(frameTypeAck)
+
+ data := makeTestData(32)
+ n, err := s.Write(data)
+ if n != len(data) || err != nil {
+ t.Fatalf("s.Write() = %v, %v; want %v, nil", n, err, len(data))
+ }
+ s.Flush()
+
+ tc.wantFrame("stream writes data up to MAX_DATA limit",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ data: data[:10],
+ })
+ tc.wantIdle("stream is blocked by MAX_DATA limit")
+
+ tc.writeFrames(packetType1RTT, debugFrameMaxData{
+ max: 20,
+ })
+ tc.wantFrame("stream writes data up to new MAX_DATA limit",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 10,
+ data: data[10:20],
+ })
+ tc.wantIdle("stream is blocked by new MAX_DATA limit")
+
+ tc.writeFrames(packetType1RTT, debugFrameMaxData{
+ max: 100,
+ })
+ tc.wantFrame("stream writes remaining data",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 20,
+ data: data[20:],
+ })
+}
+
+func TestConnOutflowMaxDataDecreases(t *testing.T) {
+ tc, s := newTestConnAndLocalStream(t, clientSide, uniStream,
+ permissiveTransportParameters,
+ func(p *transportParameters) {
+ p.initialMaxData = 10
+ })
+ tc.ignoreFrame(frameTypeAck)
+
+ // Decrease in MAX_DATA is ignored.
+ tc.writeFrames(packetType1RTT, debugFrameMaxData{
+ max: 5,
+ })
+
+ data := makeTestData(32)
+ n, err := s.Write(data)
+ if n != len(data) || err != nil {
+ t.Fatalf("s.Write() = %v, %v; want %v, nil", n, err, len(data))
+ }
+ s.Flush()
+
+ tc.wantFrame("stream writes data up to MAX_DATA limit",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ data: data[:10],
+ })
+}
+
+func TestConnOutflowMaxDataRoundRobin(t *testing.T) {
+ ctx := canceledContext()
+ tc := newTestConn(t, clientSide, permissiveTransportParameters,
+ func(p *transportParameters) {
+ p.initialMaxData = 0
+ })
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ s1, err := tc.conn.newLocalStream(ctx, uniStream)
+ if err != nil {
+ t.Fatalf("conn.newLocalStream(%v) = %v", uniStream, err)
+ }
+ s2, err := tc.conn.newLocalStream(ctx, uniStream)
+ if err != nil {
+ t.Fatalf("conn.newLocalStream(%v) = %v", uniStream, err)
+ }
+
+ s1.Write(make([]byte, 10))
+ s1.Flush()
+ s2.Write(make([]byte, 10))
+ s2.Flush()
+
+ tc.writeFrames(packetType1RTT, debugFrameMaxData{
+ max: 1,
+ })
+ tc.wantFrame("stream 1 writes data up to MAX_DATA limit",
+ packetType1RTT, debugFrameStream{
+ id: s1.id,
+ data: []byte{0},
+ })
+
+ tc.writeFrames(packetType1RTT, debugFrameMaxData{
+ max: 2,
+ })
+ tc.wantFrame("stream 2 writes data up to MAX_DATA limit",
+ packetType1RTT, debugFrameStream{
+ id: s2.id,
+ data: []byte{0},
+ })
+
+ tc.writeFrames(packetType1RTT, debugFrameMaxData{
+ max: 3,
+ })
+ tc.wantFrame("stream 1 writes data up to MAX_DATA limit",
+ packetType1RTT, debugFrameStream{
+ id: s1.id,
+ off: 1,
+ data: []byte{0},
+ })
+}
+
+func TestConnOutflowMetaAndData(t *testing.T) {
+ tc, s := newTestConnAndLocalStream(t, clientSide, bidiStream,
+ permissiveTransportParameters,
+ func(p *transportParameters) {
+ p.initialMaxData = 0
+ })
+ tc.ignoreFrame(frameTypeAck)
+
+ data := makeTestData(32)
+ s.Write(data)
+ s.Flush()
+
+ s.CloseRead()
+ tc.wantFrame("CloseRead sends a STOP_SENDING, not flow controlled",
+ packetType1RTT, debugFrameStopSending{
+ id: s.id,
+ })
+
+ tc.writeFrames(packetType1RTT, debugFrameMaxData{
+ max: 100,
+ })
+ tc.wantFrame("unblocked MAX_DATA",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ data: data,
+ })
+}
+
+func TestConnOutflowResentData(t *testing.T) {
+ tc, s := newTestConnAndLocalStream(t, clientSide, bidiStream,
+ permissiveTransportParameters,
+ func(p *transportParameters) {
+ p.initialMaxData = 10
+ })
+ tc.ignoreFrame(frameTypeAck)
+
+ data := makeTestData(15)
+ s.Write(data[:8])
+ s.Flush()
+ tc.wantFrame("data is under MAX_DATA limit, all sent",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ data: data[:8],
+ })
+
+ // Lose the last STREAM packet.
+ const pto = false
+ tc.triggerLossOrPTO(packetType1RTT, false)
+ tc.wantFrame("lost STREAM data is retransmitted",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ data: data[:8],
+ })
+
+ s.Write(data[8:])
+ s.Flush()
+ tc.wantFrame("new data is sent up to the MAX_DATA limit",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 8,
+ data: data[8:10],
+ })
+}
diff --git a/quic/conn_id.go b/quic/conn_id.go
new file mode 100644
index 000000000..2efe8d6b5
--- /dev/null
+++ b/quic/conn_id.go
@@ -0,0 +1,509 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "bytes"
+ "crypto/rand"
+)
+
+// connIDState is a conn's connection IDs.
+type connIDState struct {
+ // The destination connection IDs of packets we receive are local.
+ // The destination connection IDs of packets we send are remote.
+ //
+ // Local IDs are usually issued by us, and remote IDs by the peer.
+ // The exception is the transient destination connection ID sent in
+ // a client's Initial packets, which is chosen by the client.
+ //
+ // These are []connID rather than []*connID to minimize allocations.
+ local []connID
+ remote []remoteConnID
+
+ nextLocalSeq int64
+ retireRemotePriorTo int64 // largest Retire Prior To value sent by the peer
+ peerActiveConnIDLimit int64 // peer's active_connection_id_limit transport parameter
+
+ originalDstConnID []byte // expected original_destination_connection_id param
+ retrySrcConnID []byte // expected retry_source_connection_id param
+
+ needSend bool
+}
+
+// A connID is a connection ID and associated metadata.
+type connID struct {
+ // cid is the connection ID itself.
+ cid []byte
+
+ // seq is the connection ID's sequence number:
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-5.1.1-1
+ //
+ // For the transient destination ID in a client's Initial packet, this is -1.
+ seq int64
+
+ // retired is set when the connection ID is retired.
+ retired bool
+
+ // send is set when the connection ID's state needs to be sent to the peer.
+ //
+ // For local IDs, this indicates a new ID that should be sent
+ // in a NEW_CONNECTION_ID frame.
+ //
+ // For remote IDs, this indicates a retired ID that should be sent
+ // in a RETIRE_CONNECTION_ID frame.
+ send sentVal
+}
+
+// A remoteConnID is a connection ID and stateless reset token.
+type remoteConnID struct {
+ connID
+ resetToken statelessResetToken
+}
+
+func (s *connIDState) initClient(c *Conn) error {
+ // Client chooses its initial connection ID, and sends it
+ // in the Source Connection ID field of the first Initial packet.
+ locid, err := c.newConnID(0)
+ if err != nil {
+ return err
+ }
+ s.local = append(s.local, connID{
+ seq: 0,
+ cid: locid,
+ })
+ s.nextLocalSeq = 1
+ c.endpoint.connsMap.updateConnIDs(func(conns *connsMap) {
+ conns.addConnID(c, locid)
+ })
+
+ // Client chooses an initial, transient connection ID for the server,
+ // and sends it in the Destination Connection ID field of the first Initial packet.
+ remid, err := c.newConnID(-1)
+ if err != nil {
+ return err
+ }
+ s.remote = append(s.remote, remoteConnID{
+ connID: connID{
+ seq: -1,
+ cid: remid,
+ },
+ })
+ s.originalDstConnID = remid
+ return nil
+}
+
+func (s *connIDState) initServer(c *Conn, cids newServerConnIDs) error {
+ dstConnID := cloneBytes(cids.dstConnID)
+ // Client-chosen, transient connection ID received in the first Initial packet.
+ // The server will not use this as the Source Connection ID of packets it sends,
+ // but remembers it because it may receive packets sent to this destination.
+ s.local = append(s.local, connID{
+ seq: -1,
+ cid: dstConnID,
+ })
+
+ // Server chooses a connection ID, and sends it in the Source Connection ID of
+ // the response to the clent.
+ locid, err := c.newConnID(0)
+ if err != nil {
+ return err
+ }
+ s.local = append(s.local, connID{
+ seq: 0,
+ cid: locid,
+ })
+ s.nextLocalSeq = 1
+ c.endpoint.connsMap.updateConnIDs(func(conns *connsMap) {
+ conns.addConnID(c, dstConnID)
+ conns.addConnID(c, locid)
+ })
+
+ // Client chose its own connection ID.
+ s.remote = append(s.remote, remoteConnID{
+ connID: connID{
+ seq: 0,
+ cid: cloneBytes(cids.srcConnID),
+ },
+ })
+ return nil
+}
+
+// srcConnID is the Source Connection ID to use in a sent packet.
+func (s *connIDState) srcConnID() []byte {
+ if s.local[0].seq == -1 && len(s.local) > 1 {
+ // Don't use the transient connection ID if another is available.
+ return s.local[1].cid
+ }
+ return s.local[0].cid
+}
+
+// dstConnID is the Destination Connection ID to use in a sent packet.
+func (s *connIDState) dstConnID() (cid []byte, ok bool) {
+ for i := range s.remote {
+ if !s.remote[i].retired {
+ return s.remote[i].cid, true
+ }
+ }
+ return nil, false
+}
+
+// isValidStatelessResetToken reports whether the given reset token is
+// associated with a non-retired connection ID which we have used.
+func (s *connIDState) isValidStatelessResetToken(resetToken statelessResetToken) bool {
+ for i := range s.remote {
+ // We currently only use the first available remote connection ID,
+ // so any other reset token is not valid.
+ if !s.remote[i].retired {
+ return s.remote[i].resetToken == resetToken
+ }
+ }
+ return false
+}
+
+// setPeerActiveConnIDLimit sets the active_connection_id_limit
+// transport parameter received from the peer.
+func (s *connIDState) setPeerActiveConnIDLimit(c *Conn, lim int64) error {
+ s.peerActiveConnIDLimit = lim
+ return s.issueLocalIDs(c)
+}
+
+func (s *connIDState) issueLocalIDs(c *Conn) error {
+ toIssue := min(int(s.peerActiveConnIDLimit), maxPeerActiveConnIDLimit)
+ for i := range s.local {
+ if s.local[i].seq != -1 && !s.local[i].retired {
+ toIssue--
+ }
+ }
+ var newIDs [][]byte
+ for toIssue > 0 {
+ cid, err := c.newConnID(s.nextLocalSeq)
+ if err != nil {
+ return err
+ }
+ newIDs = append(newIDs, cid)
+ s.local = append(s.local, connID{
+ seq: s.nextLocalSeq,
+ cid: cid,
+ })
+ s.local[len(s.local)-1].send.setUnsent()
+ s.nextLocalSeq++
+ s.needSend = true
+ toIssue--
+ }
+ c.endpoint.connsMap.updateConnIDs(func(conns *connsMap) {
+ for _, cid := range newIDs {
+ conns.addConnID(c, cid)
+ }
+ })
+ return nil
+}
+
+// validateTransportParameters verifies the original_destination_connection_id and
+// initial_source_connection_id transport parameters match the expected values.
+func (s *connIDState) validateTransportParameters(c *Conn, isRetry bool, p transportParameters) error {
+ // TODO: Consider returning more detailed errors, for debugging.
+ // Verify original_destination_connection_id matches
+ // the transient remote connection ID we chose (client)
+ // or is empty (server).
+ if !bytes.Equal(s.originalDstConnID, p.originalDstConnID) {
+ return localTransportError{
+ code: errTransportParameter,
+ reason: "original_destination_connection_id mismatch",
+ }
+ }
+ s.originalDstConnID = nil // we have no further need for this
+ // Verify retry_source_connection_id matches the value from
+ // the server's Retry packet (when one was sent), or is empty.
+ if !bytes.Equal(p.retrySrcConnID, s.retrySrcConnID) {
+ return localTransportError{
+ code: errTransportParameter,
+ reason: "retry_source_connection_id mismatch",
+ }
+ }
+ s.retrySrcConnID = nil // we have no further need for this
+ // Verify initial_source_connection_id matches the first remote connection ID.
+ if len(s.remote) == 0 || s.remote[0].seq != 0 {
+ return localTransportError{
+ code: errInternal,
+ reason: "remote connection id missing",
+ }
+ }
+ if !bytes.Equal(p.initialSrcConnID, s.remote[0].cid) {
+ return localTransportError{
+ code: errTransportParameter,
+ reason: "initial_source_connection_id mismatch",
+ }
+ }
+ if len(p.statelessResetToken) > 0 {
+ if c.side == serverSide {
+ return localTransportError{
+ code: errTransportParameter,
+ reason: "client sent stateless_reset_token",
+ }
+ }
+ token := statelessResetToken(p.statelessResetToken)
+ s.remote[0].resetToken = token
+ c.endpoint.connsMap.updateConnIDs(func(conns *connsMap) {
+ conns.addResetToken(c, token)
+ })
+ }
+ return nil
+}
+
+// handlePacket updates the connection ID state during the handshake
+// (Initial and Handshake packets).
+func (s *connIDState) handlePacket(c *Conn, ptype packetType, srcConnID []byte) {
+ switch {
+ case ptype == packetTypeInitial && c.side == clientSide:
+ if len(s.remote) == 1 && s.remote[0].seq == -1 {
+ // We're a client connection processing the first Initial packet
+ // from the server. Replace the transient remote connection ID
+ // with the Source Connection ID from the packet.
+ s.remote[0] = remoteConnID{
+ connID: connID{
+ seq: 0,
+ cid: cloneBytes(srcConnID),
+ },
+ }
+ }
+ case ptype == packetTypeHandshake && c.side == serverSide:
+ if len(s.local) > 0 && s.local[0].seq == -1 && !s.local[0].retired {
+ // We're a server connection processing the first Handshake packet from
+ // the client. Discard the transient, client-chosen connection ID used
+ // for Initial packets; the client will never send it again.
+ cid := s.local[0].cid
+ c.endpoint.connsMap.updateConnIDs(func(conns *connsMap) {
+ conns.retireConnID(c, cid)
+ })
+ s.local = append(s.local[:0], s.local[1:]...)
+ }
+ }
+}
+
+func (s *connIDState) handleRetryPacket(srcConnID []byte) {
+ if len(s.remote) != 1 || s.remote[0].seq != -1 {
+ panic("BUG: handling retry with non-transient remote conn id")
+ }
+ s.retrySrcConnID = cloneBytes(srcConnID)
+ s.remote[0].cid = s.retrySrcConnID
+}
+
+func (s *connIDState) handleNewConnID(c *Conn, seq, retire int64, cid []byte, resetToken statelessResetToken) error {
+ if len(s.remote[0].cid) == 0 {
+ // "An endpoint that is sending packets with a zero-length
+ // Destination Connection ID MUST treat receipt of a NEW_CONNECTION_ID
+ // frame as a connection error of type PROTOCOL_VIOLATION."
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-19.15-6
+ return localTransportError{
+ code: errProtocolViolation,
+ reason: "NEW_CONNECTION_ID from peer with zero-length DCID",
+ }
+ }
+
+ if retire > s.retireRemotePriorTo {
+ s.retireRemotePriorTo = retire
+ }
+
+ have := false // do we already have this connection ID?
+ active := 0
+ for i := range s.remote {
+ rcid := &s.remote[i]
+ if !rcid.retired && rcid.seq >= 0 && rcid.seq < s.retireRemotePriorTo {
+ s.retireRemote(rcid)
+ c.endpoint.connsMap.updateConnIDs(func(conns *connsMap) {
+ conns.retireResetToken(c, rcid.resetToken)
+ })
+ }
+ if !rcid.retired {
+ active++
+ }
+ if rcid.seq == seq {
+ if !bytes.Equal(rcid.cid, cid) {
+ return localTransportError{
+ code: errProtocolViolation,
+ reason: "NEW_CONNECTION_ID does not match prior id",
+ }
+ }
+ have = true // yes, we've seen this sequence number
+ }
+ }
+
+ if !have {
+ // This is a new connection ID that we have not seen before.
+ //
+ // We could take steps to keep the list of remote connection IDs
+ // sorted by sequence number, but there's no particular need
+ // so we don't bother.
+ s.remote = append(s.remote, remoteConnID{
+ connID: connID{
+ seq: seq,
+ cid: cloneBytes(cid),
+ },
+ resetToken: resetToken,
+ })
+ if seq < s.retireRemotePriorTo {
+ // This ID was already retired by a previous NEW_CONNECTION_ID frame.
+ s.retireRemote(&s.remote[len(s.remote)-1])
+ } else {
+ active++
+ c.endpoint.connsMap.updateConnIDs(func(conns *connsMap) {
+ conns.addResetToken(c, resetToken)
+ })
+ }
+ }
+
+ if active > activeConnIDLimit {
+ // Retired connection IDs (including newly-retired ones) do not count
+ // against the limit.
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-5.1.1-5
+ return localTransportError{
+ code: errConnectionIDLimit,
+ reason: "active_connection_id_limit exceeded",
+ }
+ }
+
+ // "An endpoint SHOULD limit the number of connection IDs it has retired locally
+ // for which RETIRE_CONNECTION_ID frames have not yet been acknowledged."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-5.1.2-6
+ //
+ // Set a limit of four times the active_connection_id_limit for
+ // the total number of remote connection IDs we keep state for locally.
+ if len(s.remote) > 4*activeConnIDLimit {
+ return localTransportError{
+ code: errConnectionIDLimit,
+ reason: "too many unacknowledged RETIRE_CONNECTION_ID frames",
+ }
+ }
+
+ return nil
+}
+
+// retireRemote marks a remote connection ID as retired.
+func (s *connIDState) retireRemote(rcid *remoteConnID) {
+ rcid.retired = true
+ rcid.send.setUnsent()
+ s.needSend = true
+}
+
+func (s *connIDState) handleRetireConnID(c *Conn, seq int64) error {
+ if seq >= s.nextLocalSeq {
+ return localTransportError{
+ code: errProtocolViolation,
+ reason: "RETIRE_CONNECTION_ID for unissued sequence number",
+ }
+ }
+ for i := range s.local {
+ if s.local[i].seq == seq {
+ cid := s.local[i].cid
+ c.endpoint.connsMap.updateConnIDs(func(conns *connsMap) {
+ conns.retireConnID(c, cid)
+ })
+ s.local = append(s.local[:i], s.local[i+1:]...)
+ break
+ }
+ }
+ s.issueLocalIDs(c)
+ return nil
+}
+
+func (s *connIDState) ackOrLossNewConnectionID(pnum packetNumber, seq int64, fate packetFate) {
+ for i := range s.local {
+ if s.local[i].seq != seq {
+ continue
+ }
+ s.local[i].send.ackOrLoss(pnum, fate)
+ if fate != packetAcked {
+ s.needSend = true
+ }
+ return
+ }
+}
+
+func (s *connIDState) ackOrLossRetireConnectionID(pnum packetNumber, seq int64, fate packetFate) {
+ for i := 0; i < len(s.remote); i++ {
+ if s.remote[i].seq != seq {
+ continue
+ }
+ if fate == packetAcked {
+ // We have retired this connection ID, and the peer has acked.
+ // Discard its state completely.
+ s.remote = append(s.remote[:i], s.remote[i+1:]...)
+ } else {
+ // RETIRE_CONNECTION_ID frame was lost, mark for retransmission.
+ s.needSend = true
+ s.remote[i].send.ackOrLoss(pnum, fate)
+ }
+ return
+ }
+}
+
+// appendFrames appends NEW_CONNECTION_ID and RETIRE_CONNECTION_ID frames
+// to the current packet.
+//
+// It returns true if no more frames need appending,
+// false if not everything fit in the current packet.
+func (s *connIDState) appendFrames(c *Conn, pnum packetNumber, pto bool) bool {
+ if !s.needSend && !pto {
+ // Fast path: We don't need to send anything.
+ return true
+ }
+ retireBefore := int64(0)
+ if s.local[0].seq != -1 {
+ retireBefore = s.local[0].seq
+ }
+ for i := range s.local {
+ if !s.local[i].send.shouldSendPTO(pto) {
+ continue
+ }
+ if !c.w.appendNewConnectionIDFrame(
+ s.local[i].seq,
+ retireBefore,
+ s.local[i].cid,
+ c.endpoint.resetGen.tokenForConnID(s.local[i].cid),
+ ) {
+ return false
+ }
+ s.local[i].send.setSent(pnum)
+ }
+ for i := range s.remote {
+ if !s.remote[i].send.shouldSendPTO(pto) {
+ continue
+ }
+ if !c.w.appendRetireConnectionIDFrame(s.remote[i].seq) {
+ return false
+ }
+ s.remote[i].send.setSent(pnum)
+ }
+ s.needSend = false
+ return true
+}
+
+func cloneBytes(b []byte) []byte {
+ n := make([]byte, len(b))
+ copy(n, b)
+ return n
+}
+
+func (c *Conn) newConnID(seq int64) ([]byte, error) {
+ if c.testHooks != nil {
+ return c.testHooks.newConnID(seq)
+ }
+ return newRandomConnID(seq)
+}
+
+func newRandomConnID(_ int64) ([]byte, error) {
+ // It is not necessary for connection IDs to be cryptographically secure,
+ // but it doesn't hurt.
+ id := make([]byte, connIDLen)
+ if _, err := rand.Read(id); err != nil {
+ // TODO: Surface this error as a metric or log event or something.
+ // rand.Read really shouldn't ever fail, but if it does, we should
+ // have a way to inform the user.
+ return nil, err
+ }
+ return id, nil
+}
diff --git a/quic/conn_id_test.go b/quic/conn_id_test.go
new file mode 100644
index 000000000..d44472e81
--- /dev/null
+++ b/quic/conn_id_test.go
@@ -0,0 +1,666 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "bytes"
+ "crypto/tls"
+ "fmt"
+ "net/netip"
+ "strings"
+ "testing"
+)
+
+func TestConnIDClientHandshake(t *testing.T) {
+ tc := newTestConn(t, clientSide)
+ // On initialization, the client chooses local and remote IDs.
+ //
+ // The order in which we allocate the two isn't actually important,
+ // but test is a lot simpler if we assume.
+ if got, want := tc.conn.connIDState.srcConnID(), testLocalConnID(0); !bytes.Equal(got, want) {
+ t.Errorf("after initialization: srcConnID = %x, want %x", got, want)
+ }
+ dstConnID, _ := tc.conn.connIDState.dstConnID()
+ if got, want := dstConnID, testLocalConnID(-1); !bytes.Equal(got, want) {
+ t.Errorf("after initialization: dstConnID = %x, want %x", got, want)
+ }
+
+ // The server's first Initial packet provides the client with a
+ // non-transient remote connection ID.
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ dstConnID, _ = tc.conn.connIDState.dstConnID()
+ if got, want := dstConnID, testPeerConnID(0); !bytes.Equal(got, want) {
+ t.Errorf("after receiving Initial: dstConnID = %x, want %x", got, want)
+ }
+
+ wantLocal := []connID{{
+ cid: testLocalConnID(0),
+ seq: 0,
+ }}
+ if got := tc.conn.connIDState.local; !connIDListEqual(got, wantLocal) {
+ t.Errorf("local ids: %v, want %v", fmtConnIDList(got), fmtConnIDList(wantLocal))
+ }
+ wantRemote := []remoteConnID{{
+ connID: connID{
+ cid: testPeerConnID(0),
+ seq: 0,
+ },
+ }}
+ if got := tc.conn.connIDState.remote; !remoteConnIDListEqual(got, wantRemote) {
+ t.Errorf("remote ids: %v, want %v", fmtRemoteConnIDList(got), fmtRemoteConnIDList(wantRemote))
+ }
+}
+
+func TestConnIDServerHandshake(t *testing.T) {
+ tc := newTestConn(t, serverSide)
+ // On initialization, the server is provided with the client-chosen
+ // transient connection ID, and allocates an ID of its own.
+ // The Initial packet sets the remote connection ID.
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial][:1],
+ })
+ if got, want := tc.conn.connIDState.srcConnID(), testLocalConnID(0); !bytes.Equal(got, want) {
+ t.Errorf("after initClient: srcConnID = %q, want %q", got, want)
+ }
+ dstConnID, _ := tc.conn.connIDState.dstConnID()
+ if got, want := dstConnID, testPeerConnID(0); !bytes.Equal(got, want) {
+ t.Errorf("after initClient: dstConnID = %q, want %q", got, want)
+ }
+
+ // The Initial flight of CRYPTO data includes transport parameters,
+ // which cause us to allocate another local connection ID.
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ off: 1,
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial][1:],
+ })
+ wantLocal := []connID{{
+ cid: testPeerConnID(-1),
+ seq: -1,
+ }, {
+ cid: testLocalConnID(0),
+ seq: 0,
+ }, {
+ cid: testLocalConnID(1),
+ seq: 1,
+ }}
+ if got := tc.conn.connIDState.local; !connIDListEqual(got, wantLocal) {
+ t.Errorf("local ids: %v, want %v", fmtConnIDList(got), fmtConnIDList(wantLocal))
+ }
+ wantRemote := []remoteConnID{{
+ connID: connID{
+ cid: testPeerConnID(0),
+ seq: 0,
+ },
+ }}
+ if got := tc.conn.connIDState.remote; !remoteConnIDListEqual(got, wantRemote) {
+ t.Errorf("remote ids: %v, want %v", fmtRemoteConnIDList(got), fmtRemoteConnIDList(wantRemote))
+ }
+
+ // The client's first Handshake packet permits the server to discard the
+ // transient connection ID.
+ tc.writeFrames(packetTypeHandshake,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelHandshake],
+ })
+ wantLocal = []connID{{
+ cid: testLocalConnID(0),
+ seq: 0,
+ }, {
+ cid: testLocalConnID(1),
+ seq: 1,
+ }}
+ if got := tc.conn.connIDState.local; !connIDListEqual(got, wantLocal) {
+ t.Errorf("local ids: %v, want %v", fmtConnIDList(got), fmtConnIDList(wantLocal))
+ }
+}
+
+func connIDListEqual(a, b []connID) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i].seq != b[i].seq {
+ return false
+ }
+ if !bytes.Equal(a[i].cid, b[i].cid) {
+ return false
+ }
+ }
+ return true
+}
+
+func remoteConnIDListEqual(a, b []remoteConnID) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i].seq != b[i].seq {
+ return false
+ }
+ if !bytes.Equal(a[i].cid, b[i].cid) {
+ return false
+ }
+ if a[i].resetToken != b[i].resetToken {
+ return false
+ }
+ }
+ return true
+}
+
+func fmtConnIDList(s []connID) string {
+ var strs []string
+ for _, cid := range s {
+ strs = append(strs, fmt.Sprintf("[seq:%v cid:{%x}]", cid.seq, cid.cid))
+ }
+ return "{" + strings.Join(strs, " ") + "}"
+}
+
+func fmtRemoteConnIDList(s []remoteConnID) string {
+ var strs []string
+ for _, cid := range s {
+ strs = append(strs, fmt.Sprintf("[seq:%v cid:{%x} token:{%x}]", cid.seq, cid.cid, cid.resetToken))
+ }
+ return "{" + strings.Join(strs, " ") + "}"
+}
+
+func TestNewRandomConnID(t *testing.T) {
+ cid, err := newRandomConnID(0)
+ if len(cid) != connIDLen || err != nil {
+ t.Fatalf("newConnID() = %x, %v; want %v bytes", cid, connIDLen, err)
+ }
+}
+
+func TestConnIDPeerRequestsManyIDs(t *testing.T) {
+ // "An endpoint SHOULD ensure that its peer has a sufficient number
+ // of available and unused connection IDs."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-5.1.1-4
+ //
+ // "An endpoint MAY limit the total number of connection IDs
+ // issued for each connection [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-5.1.1-6
+ //
+ // Peer requests 100 connection IDs.
+ // We give them 4 in total.
+ tc := newTestConn(t, serverSide, func(p *transportParameters) {
+ p.activeConnIDLimit = 100
+ })
+ tc.ignoreFrame(frameTypeAck)
+ tc.ignoreFrame(frameTypeCrypto)
+
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ tc.wantFrame("provide additional connection ID 1",
+ packetType1RTT, debugFrameNewConnectionID{
+ seq: 1,
+ connID: testLocalConnID(1),
+ token: testLocalStatelessResetToken(1),
+ })
+ tc.wantFrame("provide additional connection ID 2",
+ packetType1RTT, debugFrameNewConnectionID{
+ seq: 2,
+ connID: testLocalConnID(2),
+ token: testLocalStatelessResetToken(2),
+ })
+ tc.wantFrame("provide additional connection ID 3",
+ packetType1RTT, debugFrameNewConnectionID{
+ seq: 3,
+ connID: testLocalConnID(3),
+ token: testLocalStatelessResetToken(3),
+ })
+ tc.wantIdle("connection ID limit reached, no more to provide")
+}
+
+func TestConnIDPeerProvidesTooManyIDs(t *testing.T) {
+ // "An endpoint MUST NOT provide more connection IDs than the peer's limit."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-5.1.1-4
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ connID: testLocalConnID(2),
+ })
+ tc.wantFrame("peer provided 3 connection IDs, our limit is 2",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errConnectionIDLimit,
+ })
+}
+
+func TestConnIDPeerTemporarilyExceedsActiveConnIDLimit(t *testing.T) {
+ // "An endpoint MAY send connection IDs that temporarily exceed a peer's limit
+ // if the NEW_CONNECTION_ID frame also requires the retirement of any excess [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-5.1.1-4
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ retirePriorTo: 2,
+ seq: 2,
+ connID: testPeerConnID(2),
+ }, debugFrameNewConnectionID{
+ retirePriorTo: 2,
+ seq: 3,
+ connID: testPeerConnID(3),
+ })
+ tc.wantFrame("peer requested we retire conn id 0",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 0,
+ })
+ tc.wantFrame("peer requested we retire conn id 1",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 1,
+ })
+}
+
+func TestConnIDPeerRetiresConnID(t *testing.T) {
+ // "An endpoint SHOULD supply a new connection ID when the peer retires a connection ID."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-5.1.1-6
+ for _, side := range []connSide{
+ clientSide,
+ serverSide,
+ } {
+ t.Run(side.String(), func(t *testing.T) {
+ tc := newTestConn(t, side)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameRetireConnectionID{
+ seq: 0,
+ })
+ tc.wantFrame("provide replacement connection ID",
+ packetType1RTT, debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 1,
+ connID: testLocalConnID(2),
+ token: testLocalStatelessResetToken(2),
+ })
+ })
+ }
+}
+
+func TestConnIDPeerWithZeroLengthConnIDSendsNewConnectionID(t *testing.T) {
+ // "An endpoint that selects a zero-length connection ID during the handshake
+ // cannot issue a new connection ID."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-5.1.1-8
+ tc := newTestConn(t, clientSide, func(p *transportParameters) {
+ p.initialSrcConnID = []byte{}
+ })
+ tc.peerConnID = []byte{}
+ tc.ignoreFrame(frameTypeAck)
+ tc.uncheckedHandshake()
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 1,
+ connID: testPeerConnID(1),
+ })
+ tc.wantFrame("invalid NEW_CONNECTION_ID: previous conn id is zero-length",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errProtocolViolation,
+ })
+}
+
+func TestConnIDPeerRequestsRetirement(t *testing.T) {
+ // "Upon receipt of an increased Retire Prior To field, the peer MUST
+ // stop using the corresponding connection IDs and retire them with
+ // RETIRE_CONNECTION_ID frames [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-5.1.2-5
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 1,
+ connID: testPeerConnID(2),
+ })
+ tc.wantFrame("peer asked for conn id 0 to be retired",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 0,
+ })
+ if got, want := tc.lastPacket.dstConnID, testPeerConnID(1); !bytes.Equal(got, want) {
+ t.Fatalf("used destination conn id {%x}, want {%x}", got, want)
+ }
+}
+
+func TestConnIDPeerDoesNotAcknowledgeRetirement(t *testing.T) {
+ // "An endpoint SHOULD limit the number of connection IDs it has retired locally
+ // for which RETIRE_CONNECTION_ID frames have not yet been acknowledged."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-5.1.2-6
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ tc.ignoreFrame(frameTypeRetireConnectionID)
+
+ // Send a number of NEW_CONNECTION_ID frames, each retiring an old one.
+ for seq := int64(0); seq < 7; seq++ {
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: seq + 2,
+ retirePriorTo: seq + 1,
+ connID: testPeerConnID(seq + 2),
+ })
+ // We're ignoring the RETIRE_CONNECTION_ID frames.
+ }
+ tc.wantFrame("number of retired, unacked conn ids is too large",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errConnectionIDLimit,
+ })
+}
+
+func TestConnIDRepeatedNewConnectionIDFrame(t *testing.T) {
+ // "Receipt of the same [NEW_CONNECTION_ID] frame multiple times
+ // MUST NOT be treated as a connection error.
+ // https://www.rfc-editor.org/rfc/rfc9000#section-19.15-7
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ for i := 0; i < 4; i++ {
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 1,
+ connID: testPeerConnID(2),
+ })
+ }
+ tc.wantFrame("peer asked for conn id to be retired",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 0,
+ })
+ tc.wantIdle("repeated NEW_CONNECTION_ID frames are not an error")
+}
+
+func TestConnIDForSequenceNumberChanges(t *testing.T) {
+ // "[...] if a sequence number is used for different connection IDs,
+ // the endpoint MAY treat that receipt as a connection error
+ // of type PROTOCOL_VIOLATION."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-19.15-8
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ tc.ignoreFrame(frameTypeRetireConnectionID)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 1,
+ connID: testPeerConnID(2),
+ })
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 1,
+ connID: testPeerConnID(3),
+ })
+ tc.wantFrame("connection ID for sequence 0 has changed",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errProtocolViolation,
+ })
+}
+
+func TestConnIDRetirePriorToAfterNewConnID(t *testing.T) {
+ // "Receiving a value in the Retire Prior To field that is greater than
+ // that in the Sequence Number field MUST be treated as a connection error
+ // of type FRAME_ENCODING_ERROR.
+ // https://www.rfc-editor.org/rfc/rfc9000#section-19.15-9
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ retirePriorTo: 3,
+ seq: 2,
+ connID: testPeerConnID(2),
+ })
+ tc.wantFrame("invalid NEW_CONNECTION_ID: retired the new conn id",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errFrameEncoding,
+ })
+}
+
+func TestConnIDAlreadyRetired(t *testing.T) {
+ // "An endpoint that receives a NEW_CONNECTION_ID frame with a
+ // sequence number smaller than the Retire Prior To field of a
+ // previously received NEW_CONNECTION_ID frame MUST send a
+ // corresponding RETIRE_CONNECTION_ID frame [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-19.15-11
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 4,
+ retirePriorTo: 3,
+ connID: testPeerConnID(4),
+ })
+ tc.wantFrame("peer asked for conn id to be retired",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 0,
+ })
+ tc.wantFrame("peer asked for conn id to be retired",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 1,
+ })
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 0,
+ connID: testPeerConnID(2),
+ })
+ tc.wantFrame("NEW_CONNECTION_ID was for an already-retired ID",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 2,
+ })
+}
+
+func TestConnIDRepeatedRetireConnectionIDFrame(t *testing.T) {
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ for i := 0; i < 4; i++ {
+ tc.writeFrames(packetType1RTT,
+ debugFrameRetireConnectionID{
+ seq: 0,
+ })
+ }
+ tc.wantFrame("issue new conn id after peer retires one",
+ packetType1RTT, debugFrameNewConnectionID{
+ retirePriorTo: 1,
+ seq: 2,
+ connID: testLocalConnID(2),
+ token: testLocalStatelessResetToken(2),
+ })
+ tc.wantIdle("repeated RETIRE_CONNECTION_ID frames are not an error")
+}
+
+func TestConnIDRetiredUnsent(t *testing.T) {
+ // "Receipt of a RETIRE_CONNECTION_ID frame containing a sequence number
+ // greater than any previously sent to the peer MUST be treated as a
+ // connection error of type PROTOCOL_VIOLATION."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-19.16-7
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameRetireConnectionID{
+ seq: 2,
+ })
+ tc.wantFrame("invalid NEW_CONNECTION_ID: previous conn id is zero-length",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errProtocolViolation,
+ })
+}
+
+func TestConnIDUsePreferredAddressConnID(t *testing.T) {
+ // Peer gives us a connection ID in the preferred address transport parameter.
+ // We don't use the preferred address at this time, but we should use the
+ // connection ID. (It isn't tied to any specific address.)
+ //
+ // This test will probably need updating if/when we start using the preferred address.
+ cid := testPeerConnID(10)
+ tc := newTestConn(t, serverSide, func(p *transportParameters) {
+ p.preferredAddrV4 = netip.MustParseAddrPort("0.0.0.0:0")
+ p.preferredAddrV6 = netip.MustParseAddrPort("[::0]:0")
+ p.preferredAddrConnID = cid
+ p.preferredAddrResetToken = make([]byte, 16)
+ })
+ tc.uncheckedHandshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 1,
+ connID: []byte{0xff},
+ })
+ tc.wantFrame("peer asked for conn id 0 to be retired",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 0,
+ })
+ if got, want := tc.lastPacket.dstConnID, cid; !bytes.Equal(got, want) {
+ t.Fatalf("used destination conn id {%x}, want {%x} from preferred address transport parameter", got, want)
+ }
+}
+
+func TestConnIDPeerProvidesPreferredAddrAndTooManyConnIDs(t *testing.T) {
+ // Peer gives us more conn ids than our advertised limit,
+ // including a conn id in the preferred address transport parameter.
+ cid := testPeerConnID(10)
+ tc := newTestConn(t, serverSide, func(p *transportParameters) {
+ p.preferredAddrV4 = netip.MustParseAddrPort("0.0.0.0:0")
+ p.preferredAddrV6 = netip.MustParseAddrPort("[::0]:0")
+ p.preferredAddrConnID = cid
+ p.preferredAddrResetToken = make([]byte, 16)
+ })
+ tc.uncheckedHandshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 0,
+ connID: testPeerConnID(2),
+ })
+ tc.wantFrame("peer provided 3 connection IDs, our limit is 2",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errConnectionIDLimit,
+ })
+}
+
+func TestConnIDPeerWithZeroLengthIDProvidesPreferredAddr(t *testing.T) {
+ // Peer gives us more conn ids than our advertised limit,
+ // including a conn id in the preferred address transport parameter.
+ tc := newTestConn(t, serverSide, func(p *transportParameters) {
+ p.initialSrcConnID = []byte{}
+ p.preferredAddrV4 = netip.MustParseAddrPort("0.0.0.0:0")
+ p.preferredAddrV6 = netip.MustParseAddrPort("[::0]:0")
+ p.preferredAddrConnID = testPeerConnID(1)
+ p.preferredAddrResetToken = make([]byte, 16)
+ }, func(cids *newServerConnIDs) {
+ cids.srcConnID = []byte{}
+ }, func(tc *testConn) {
+ tc.peerConnID = []byte{}
+ })
+
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ tc.wantFrame("peer with zero-length connection ID tried to provide another in transport parameters",
+ packetTypeInitial, debugFrameConnectionCloseTransport{
+ code: errProtocolViolation,
+ })
+}
+
+func TestConnIDInitialSrcConnIDMismatch(t *testing.T) {
+ // "Endpoints MUST validate that received [initial_source_connection_id]
+ // parameters match received connection ID values."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-7.3-3
+ testSides(t, "", func(t *testing.T, side connSide) {
+ tc := newTestConn(t, side, func(p *transportParameters) {
+ p.initialSrcConnID = []byte("invalid")
+ })
+ tc.ignoreFrame(frameTypeAck)
+ tc.ignoreFrame(frameTypeCrypto)
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ if side == clientSide {
+ // Server transport parameters are carried in the Handshake packet.
+ tc.writeFrames(packetTypeHandshake,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelHandshake],
+ })
+ }
+ tc.wantFrame("initial_source_connection_id transport parameter mismatch",
+ packetTypeInitial, debugFrameConnectionCloseTransport{
+ code: errTransportParameter,
+ })
+ })
+}
+
+func TestConnIDsCleanedUpAfterClose(t *testing.T) {
+ testSides(t, "", func(t *testing.T, side connSide) {
+ tc := newTestConn(t, side, func(p *transportParameters) {
+ if side == clientSide {
+ token := testPeerStatelessResetToken(0)
+ p.statelessResetToken = token[:]
+ }
+ })
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 1,
+ connID: testPeerConnID(2),
+ token: testPeerStatelessResetToken(0),
+ })
+ tc.wantFrame("peer asked for conn id 0 to be retired",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 0,
+ })
+ tc.writeFrames(packetType1RTT, debugFrameConnectionCloseTransport{})
+ tc.conn.Abort(nil)
+ tc.wantFrame("CONN_CLOSE sent after user closes connection",
+ packetType1RTT, debugFrameConnectionCloseTransport{})
+
+ // Wait for the conn to drain.
+ // Then wait for the conn loop to exit,
+ // and force an immediate sync of the connsMap updates
+ // (normally only done by the endpoint read loop).
+ tc.advanceToTimer()
+ <-tc.conn.donec
+ tc.endpoint.e.connsMap.applyUpdates()
+
+ if got := len(tc.endpoint.e.connsMap.byConnID); got != 0 {
+ t.Errorf("%v conn ids in endpoint map after closing, want 0", got)
+ }
+ if got := len(tc.endpoint.e.connsMap.byResetToken); got != 0 {
+ t.Errorf("%v reset tokens in endpoint map after closing, want 0", got)
+ }
+ })
+}
diff --git a/quic/conn_loss.go b/quic/conn_loss.go
new file mode 100644
index 000000000..623ebdd7c
--- /dev/null
+++ b/quic/conn_loss.go
@@ -0,0 +1,87 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import "fmt"
+
+// handleAckOrLoss deals with the final fate of a packet we sent:
+// Either the peer acknowledges it, or we declare it lost.
+//
+// In order to handle packet loss, we must retain any information sent to the peer
+// until the peer has acknowledged it.
+//
+// When information is acknowledged, we can discard it.
+//
+// When information is lost, we mark it for retransmission.
+// See RFC 9000, Section 13.3 for a complete list of information which is retransmitted on loss.
+// https://www.rfc-editor.org/rfc/rfc9000#section-13.3
+func (c *Conn) handleAckOrLoss(space numberSpace, sent *sentPacket, fate packetFate) {
+ if fate == packetLost && c.logEnabled(QLogLevelPacket) {
+ c.logPacketLost(space, sent)
+ }
+
+ // The list of frames in a sent packet is marshaled into a buffer in the sentPacket
+ // by the packetWriter. Unmarshal that buffer here. This code must be kept in sync with
+ // packetWriter.append*.
+ //
+ // A sent packet meets its fate (acked or lost) only once, so it's okay to consume
+ // the sentPacket's buffer here.
+ for !sent.done() {
+ switch f := sent.next(); f {
+ default:
+ panic(fmt.Sprintf("BUG: unhandled acked/lost frame type %x", f))
+ case frameTypeAck:
+ // Unlike most information, loss of an ACK frame does not trigger
+ // retransmission. ACKs are sent in response to ack-eliciting packets,
+ // and always contain the latest information available.
+ //
+ // Acknowledgement of an ACK frame may allow us to discard information
+ // about older packets.
+ largest := packetNumber(sent.nextInt())
+ if fate == packetAcked {
+ c.acks[space].handleAck(largest)
+ }
+ case frameTypeCrypto:
+ start, end := sent.nextRange()
+ c.crypto[space].ackOrLoss(start, end, fate)
+ case frameTypeMaxData:
+ c.ackOrLossMaxData(sent.num, fate)
+ case frameTypeResetStream,
+ frameTypeStopSending,
+ frameTypeMaxStreamData,
+ frameTypeStreamDataBlocked:
+ id := streamID(sent.nextInt())
+ s := c.streamForID(id)
+ if s == nil {
+ continue
+ }
+ s.ackOrLoss(sent.num, f, fate)
+ case frameTypeStreamBase,
+ frameTypeStreamBase | streamFinBit:
+ id := streamID(sent.nextInt())
+ start, end := sent.nextRange()
+ s := c.streamForID(id)
+ if s == nil {
+ continue
+ }
+ fin := f&streamFinBit != 0
+ s.ackOrLossData(sent.num, start, end, fin, fate)
+ case frameTypeMaxStreamsBidi:
+ c.streams.remoteLimit[bidiStream].sendMax.ackLatestOrLoss(sent.num, fate)
+ case frameTypeMaxStreamsUni:
+ c.streams.remoteLimit[uniStream].sendMax.ackLatestOrLoss(sent.num, fate)
+ case frameTypeNewConnectionID:
+ seq := int64(sent.nextInt())
+ c.connIDState.ackOrLossNewConnectionID(sent.num, seq, fate)
+ case frameTypeRetireConnectionID:
+ seq := int64(sent.nextInt())
+ c.connIDState.ackOrLossRetireConnectionID(sent.num, seq, fate)
+ case frameTypeHandshakeDone:
+ c.handshakeConfirmed.ackOrLoss(sent.num, fate)
+ }
+ }
+}
diff --git a/quic/conn_loss_test.go b/quic/conn_loss_test.go
new file mode 100644
index 000000000..81d537803
--- /dev/null
+++ b/quic/conn_loss_test.go
@@ -0,0 +1,726 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "testing"
+)
+
+// Frames may be retransmitted either when the packet containing the frame is lost, or on PTO.
+// lostFrameTest runs a test in both configurations.
+func lostFrameTest(t *testing.T, f func(t *testing.T, pto bool)) {
+ t.Run("lost", func(t *testing.T) {
+ f(t, false)
+ })
+ t.Run("pto", func(t *testing.T) {
+ f(t, true)
+ })
+}
+
+// triggerLossOrPTO causes the conn to declare the last sent packet lost,
+// or advances to the PTO timer.
+func (tc *testConn) triggerLossOrPTO(ptype packetType, pto bool) {
+ tc.t.Helper()
+ if pto {
+ if !tc.conn.loss.ptoTimerArmed {
+ tc.t.Fatalf("PTO timer not armed, expected it to be")
+ }
+ if *testVV {
+ tc.t.Logf("advancing to PTO timer")
+ }
+ tc.advanceTo(tc.conn.loss.timer)
+ return
+ }
+ if *testVV {
+ *testVV = false
+ defer func() {
+ tc.t.Logf("cause conn to declare last packet lost")
+ *testVV = true
+ }()
+ }
+ defer func(ignoreFrames map[byte]bool) {
+ tc.ignoreFrames = ignoreFrames
+ }(tc.ignoreFrames)
+ tc.ignoreFrames = map[byte]bool{
+ frameTypeAck: true,
+ frameTypePadding: true,
+ }
+ // Send three packets containing PINGs, and then respond with an ACK for the
+ // last one. This puts the last packet before the PINGs outside the packet
+ // reordering threshold, and it will be declared lost.
+ const lossThreshold = 3
+ var num packetNumber
+ for i := 0; i < lossThreshold; i++ {
+ tc.conn.ping(spaceForPacketType(ptype))
+ d := tc.readDatagram()
+ if d == nil {
+ tc.t.Fatalf("conn is idle; want PING frame")
+ }
+ if d.packets[0].ptype != ptype {
+ tc.t.Fatalf("conn sent %v packet; want %v", d.packets[0].ptype, ptype)
+ }
+ num = d.packets[0].num
+ }
+ tc.writeFrames(ptype, debugFrameAck{
+ ranges: []i64range[packetNumber]{
+ {num, num + 1},
+ },
+ })
+}
+
+func TestLostResetStreamFrame(t *testing.T) {
+ // "Cancellation of stream transmission, as carried in a RESET_STREAM frame,
+ // is sent until acknowledged or until all stream data is acknowledged by the peer [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.3-3.4
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ tc, s := newTestConnAndLocalStream(t, serverSide, uniStream, permissiveTransportParameters)
+ tc.ignoreFrame(frameTypeAck)
+
+ s.Reset(1)
+ tc.wantFrame("reset stream",
+ packetType1RTT, debugFrameResetStream{
+ id: s.id,
+ code: 1,
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("resent RESET_STREAM frame",
+ packetType1RTT, debugFrameResetStream{
+ id: s.id,
+ code: 1,
+ })
+ })
+}
+
+func TestLostStopSendingFrame(t *testing.T) {
+ // "[...] a request to cancel stream transmission, as encoded in a STOP_SENDING frame,
+ // is sent until the receiving part of the stream enters either a "Data Recvd" or
+ // "Reset Recvd" state [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.3-3.5
+ //
+ // Technically, we can stop sending a STOP_SENDING frame if the peer sends
+ // us all the data for the stream or resets it. We don't bother tracking this,
+ // however, so we'll keep sending the frame until it is acked. This is harmless.
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ tc, s := newTestConnAndRemoteStream(t, serverSide, uniStream, permissiveTransportParameters)
+ tc.ignoreFrame(frameTypeAck)
+
+ s.CloseRead()
+ tc.wantFrame("stream is read-closed",
+ packetType1RTT, debugFrameStopSending{
+ id: s.id,
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("resent STOP_SENDING frame",
+ packetType1RTT, debugFrameStopSending{
+ id: s.id,
+ })
+ })
+}
+
+func TestLostCryptoFrame(t *testing.T) {
+ // "Data sent in CRYPTO frames is retransmitted [...] until all data has been acknowledged."
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.3-3.1
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ tc := newTestConn(t, clientSide)
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.wantFrame("client sends Initial CRYPTO frame",
+ packetTypeInitial, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelInitial],
+ })
+ tc.triggerLossOrPTO(packetTypeInitial, pto)
+ tc.wantFrame("client resends Initial CRYPTO frame",
+ packetTypeInitial, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelInitial],
+ })
+
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ tc.writeFrames(packetTypeHandshake,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelHandshake],
+ })
+
+ tc.wantFrame("client sends Handshake CRYPTO frame",
+ packetTypeHandshake, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelHandshake],
+ })
+ tc.wantFrame("client provides server with an additional connection ID",
+ packetType1RTT, debugFrameNewConnectionID{
+ seq: 1,
+ connID: testLocalConnID(1),
+ token: testLocalStatelessResetToken(1),
+ })
+ tc.triggerLossOrPTO(packetTypeHandshake, pto)
+ tc.wantFrame("client resends Handshake CRYPTO frame",
+ packetTypeHandshake, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelHandshake],
+ })
+ })
+}
+
+func TestLostStreamFrameEmpty(t *testing.T) {
+ // A STREAM frame opening a stream, but containing no stream data, should
+ // be retransmitted if lost.
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ ctx := canceledContext()
+ tc := newTestConn(t, clientSide, permissiveTransportParameters)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ c, err := tc.conn.NewStream(ctx)
+ if err != nil {
+ t.Fatalf("NewStream: %v", err)
+ }
+ c.Flush() // open the stream
+ tc.wantFrame("created bidirectional stream 0",
+ packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, bidiStream, 0),
+ data: []byte{},
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("resent stream frame",
+ packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, bidiStream, 0),
+ data: []byte{},
+ })
+ })
+}
+
+func TestLostStreamWithData(t *testing.T) {
+ // "Application data sent in STREAM frames is retransmitted in new STREAM
+ // frames unless the endpoint has sent a RESET_STREAM for that stream."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.3-3.2
+ //
+ // TODO: Lost stream frame after RESET_STREAM
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ data := []byte{0, 1, 2, 3, 4, 5, 6, 7}
+ tc, s := newTestConnAndLocalStream(t, serverSide, uniStream, func(p *transportParameters) {
+ p.initialMaxStreamsUni = 1
+ p.initialMaxData = 1 << 20
+ p.initialMaxStreamDataUni = 1 << 20
+ })
+ s.Write(data[:4])
+ s.Flush()
+ tc.wantFrame("send [0,4)",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 0,
+ data: data[:4],
+ })
+ s.Write(data[4:8])
+ s.Flush()
+ tc.wantFrame("send [4,8)",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 4,
+ data: data[4:8],
+ })
+ s.CloseWrite()
+ tc.wantFrame("send FIN",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 8,
+ fin: true,
+ data: []byte{},
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("resend data",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 0,
+ fin: true,
+ data: data[:8],
+ })
+ })
+}
+
+func TestLostStreamPartialLoss(t *testing.T) {
+ // Conn sends four STREAM packets.
+ // ACKs are received for the packets containing bytes 0 and 2.
+ // The remaining packets are declared lost.
+ // The Conn resends only the lost data.
+ //
+ // This test doesn't have a PTO mode, because the ACK for the packet containing byte 2
+ // starts the loss timer for the packet containing byte 1, and the PTO timer is not
+ // armed when the loss timer is.
+ data := []byte{0, 1, 2, 3}
+ tc, s := newTestConnAndLocalStream(t, serverSide, uniStream, func(p *transportParameters) {
+ p.initialMaxStreamsUni = 1
+ p.initialMaxData = 1 << 20
+ p.initialMaxStreamDataUni = 1 << 20
+ })
+ for i := range data {
+ s.Write(data[i : i+1])
+ s.Flush()
+ tc.wantFrame(fmt.Sprintf("send STREAM frame with byte %v", i),
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: int64(i),
+ data: data[i : i+1],
+ })
+ if i%2 == 0 {
+ tc.writeAckForLatest()
+ }
+ }
+ const pto = false
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("resend byte 1",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 1,
+ data: data[1:2],
+ })
+ tc.wantFrame("resend byte 3",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 3,
+ data: data[3:4],
+ })
+ tc.wantIdle("no more frames sent after packet loss")
+}
+
+func TestLostMaxDataFrame(t *testing.T) {
+ // "An updated value is sent in a MAX_DATA frame if the packet
+ // containing the most recently sent MAX_DATA frame is declared lost [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.3-3.7
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ const maxWindowSize = 32
+ buf := make([]byte, maxWindowSize)
+ tc, s := newTestConnAndRemoteStream(t, serverSide, uniStream, func(c *Config) {
+ c.MaxConnReadBufferSize = 32
+ })
+
+ // We send MAX_DATA = 63.
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 0,
+ data: make([]byte, maxWindowSize-1),
+ })
+ if n, err := s.Read(buf[:maxWindowSize]); err != nil || n != maxWindowSize-1 {
+ t.Fatalf("Read() = %v, %v; want %v, nil", n, err, maxWindowSize-1)
+ }
+ tc.wantFrame("conn window is extended after reading data",
+ packetType1RTT, debugFrameMaxData{
+ max: (maxWindowSize * 2) - 1,
+ })
+
+ // MAX_DATA = 64, which is only one more byte, so we don't send the frame.
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: maxWindowSize - 1,
+ data: make([]byte, 1),
+ })
+ if n, err := s.Read(buf[:1]); err != nil || n != 1 {
+ t.Fatalf("Read() = %v, %v; want %v, nil", n, err, 1)
+ }
+ tc.wantIdle("read doesn't extend window enough to send another MAX_DATA")
+
+ // The MAX_DATA = 63 packet was lost, so we send 64.
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("resent MAX_DATA includes most current value",
+ packetType1RTT, debugFrameMaxData{
+ max: maxWindowSize * 2,
+ })
+ })
+}
+
+func TestLostMaxStreamDataFrame(t *testing.T) {
+ // "[...] an updated value is sent when the packet containing
+ // the most recent MAX_STREAM_DATA frame for a stream is lost"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.3-3.8
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ const maxWindowSize = 32
+ buf := make([]byte, maxWindowSize)
+ tc, s := newTestConnAndRemoteStream(t, serverSide, uniStream, func(c *Config) {
+ c.MaxStreamReadBufferSize = maxWindowSize
+ })
+
+ // We send MAX_STREAM_DATA = 63.
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 0,
+ data: make([]byte, maxWindowSize-1),
+ })
+ if n, err := s.Read(buf[:maxWindowSize]); err != nil || n != maxWindowSize-1 {
+ t.Fatalf("Read() = %v, %v; want %v, nil", n, err, maxWindowSize-1)
+ }
+ tc.wantFrame("stream window is extended after reading data",
+ packetType1RTT, debugFrameMaxStreamData{
+ id: s.id,
+ max: (maxWindowSize * 2) - 1,
+ })
+
+ // MAX_STREAM_DATA = 64, which is only one more byte, so we don't send the frame.
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: maxWindowSize - 1,
+ data: make([]byte, 1),
+ })
+ if n, err := s.Read(buf); err != nil || n != 1 {
+ t.Fatalf("Read() = %v, %v; want %v, nil", n, err, 1)
+ }
+ tc.wantIdle("read doesn't extend window enough to send another MAX_STREAM_DATA")
+
+ // The MAX_STREAM_DATA = 63 packet was lost, so we send 64.
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("resent MAX_STREAM_DATA includes most current value",
+ packetType1RTT, debugFrameMaxStreamData{
+ id: s.id,
+ max: maxWindowSize * 2,
+ })
+ })
+}
+
+func TestLostMaxStreamDataFrameAfterStreamFinReceived(t *testing.T) {
+ // "An endpoint SHOULD stop sending MAX_STREAM_DATA frames when
+ // the receiving part of the stream enters a "Size Known" or "Reset Recvd" state."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.3-3.8
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ const maxWindowSize = 10
+ buf := make([]byte, maxWindowSize)
+ tc, s := newTestConnAndRemoteStream(t, serverSide, uniStream, func(c *Config) {
+ c.MaxStreamReadBufferSize = maxWindowSize
+ })
+
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 0,
+ data: make([]byte, maxWindowSize),
+ })
+ if n, err := s.Read(buf); err != nil || n != maxWindowSize {
+ t.Fatalf("Read() = %v, %v; want %v, nil", n, err, maxWindowSize)
+ }
+ tc.wantFrame("stream window is extended after reading data",
+ packetType1RTT, debugFrameMaxStreamData{
+ id: s.id,
+ max: 2 * maxWindowSize,
+ })
+
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: maxWindowSize,
+ fin: true,
+ })
+
+ tc.ignoreFrame(frameTypePing)
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantIdle("lost MAX_STREAM_DATA not resent for stream in 'size known'")
+ })
+}
+
+func TestLostMaxStreamsFrameMostRecent(t *testing.T) {
+ // "[...] an updated value is sent when a packet containing the
+ // most recent MAX_STREAMS for a stream type frame is declared lost [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.3-3.9
+ testStreamTypes(t, "", func(t *testing.T, styp streamType) {
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ ctx := canceledContext()
+ tc := newTestConn(t, serverSide, func(c *Config) {
+ c.MaxUniRemoteStreams = 1
+ c.MaxBidiRemoteStreams = 1
+ })
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, styp, 0),
+ fin: true,
+ })
+ s, err := tc.conn.AcceptStream(ctx)
+ if err != nil {
+ t.Fatalf("AcceptStream() = %v", err)
+ }
+ s.SetWriteContext(ctx)
+ s.Close()
+ if styp == bidiStream {
+ tc.wantFrame("stream is closed",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ data: []byte{},
+ fin: true,
+ })
+ tc.writeAckForAll()
+ }
+ tc.wantFrame("closing stream updates peer's MAX_STREAMS",
+ packetType1RTT, debugFrameMaxStreams{
+ streamType: styp,
+ max: 2,
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("lost MAX_STREAMS is resent",
+ packetType1RTT, debugFrameMaxStreams{
+ streamType: styp,
+ max: 2,
+ })
+ })
+ })
+}
+
+func TestLostMaxStreamsFrameNotMostRecent(t *testing.T) {
+ // Send two MAX_STREAMS frames, lose the first one.
+ //
+ // No PTO mode for this test: The ack that causes the first frame
+ // to be lost arms the loss timer for the second, so the PTO timer is not armed.
+ const pto = false
+ ctx := canceledContext()
+ tc := newTestConn(t, serverSide, func(c *Config) {
+ c.MaxUniRemoteStreams = 2
+ })
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ for i := int64(0); i < 2; i++ {
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, uniStream, i),
+ fin: true,
+ })
+ s, err := tc.conn.AcceptStream(ctx)
+ if err != nil {
+ t.Fatalf("AcceptStream() = %v", err)
+ }
+ if err := s.Close(); err != nil {
+ t.Fatalf("stream.Close() = %v", err)
+ }
+ tc.wantFrame("closing stream updates peer's MAX_STREAMS",
+ packetType1RTT, debugFrameMaxStreams{
+ streamType: uniStream,
+ max: 3 + i,
+ })
+ }
+
+ // The second MAX_STREAMS frame is acked.
+ tc.writeAckForLatest()
+
+ // The first MAX_STREAMS frame is lost.
+ tc.conn.ping(appDataSpace)
+ tc.wantFrame("connection should send a PING frame",
+ packetType1RTT, debugFramePing{})
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantIdle("superseded MAX_DATA is not resent on loss")
+}
+
+func TestLostStreamDataBlockedFrame(t *testing.T) {
+ // "A new [STREAM_DATA_BLOCKED] frame is sent if a packet containing
+ // the most recent frame for a scope is lost [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.3-3.10
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ tc, s := newTestConnAndLocalStream(t, serverSide, uniStream, func(p *transportParameters) {
+ p.initialMaxStreamsUni = 1
+ p.initialMaxData = 1 << 20
+ })
+
+ w := runAsync(tc, func(ctx context.Context) (int, error) {
+ return s.Write([]byte{0, 1, 2, 3})
+ })
+ defer w.cancel()
+ tc.wantFrame("write is blocked by flow control",
+ packetType1RTT, debugFrameStreamDataBlocked{
+ id: s.id,
+ max: 0,
+ })
+
+ tc.writeFrames(packetType1RTT, debugFrameMaxStreamData{
+ id: s.id,
+ max: 1,
+ })
+ tc.wantFrame("write makes some progress, but is still blocked by flow control",
+ packetType1RTT, debugFrameStreamDataBlocked{
+ id: s.id,
+ max: 1,
+ })
+ tc.wantFrame("write consuming available window",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 0,
+ data: []byte{0},
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("STREAM_DATA_BLOCKED is resent",
+ packetType1RTT, debugFrameStreamDataBlocked{
+ id: s.id,
+ max: 1,
+ })
+ tc.wantFrame("STREAM is resent as well",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 0,
+ data: []byte{0},
+ })
+ })
+}
+
+func TestLostStreamDataBlockedFrameAfterStreamUnblocked(t *testing.T) {
+ // "A new [STREAM_DATA_BLOCKED] frame is sent [...] only while
+ // the endpoint is blocked on the corresponding limit."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.3-3.10
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ tc, s := newTestConnAndLocalStream(t, serverSide, uniStream, func(p *transportParameters) {
+ p.initialMaxStreamsUni = 1
+ p.initialMaxData = 1 << 20
+ })
+
+ data := []byte{0, 1, 2, 3}
+ w := runAsync(tc, func(ctx context.Context) (int, error) {
+ return s.Write(data)
+ })
+ defer w.cancel()
+ tc.wantFrame("write is blocked by flow control",
+ packetType1RTT, debugFrameStreamDataBlocked{
+ id: s.id,
+ max: 0,
+ })
+
+ tc.writeFrames(packetType1RTT, debugFrameMaxStreamData{
+ id: s.id,
+ max: 10,
+ })
+ tc.wantFrame("write completes after flow control available",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 0,
+ data: data,
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("STREAM data is resent",
+ packetType1RTT, debugFrameStream{
+ id: s.id,
+ off: 0,
+ data: data,
+ })
+ tc.wantIdle("STREAM_DATA_BLOCKED is not resent, since the stream is not blocked")
+ })
+}
+
+func TestLostNewConnectionIDFrame(t *testing.T) {
+ // "New connection IDs are [...] retransmitted if the packet containing them is lost."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.3-3.13
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameRetireConnectionID{
+ seq: 1,
+ })
+ tc.wantFrame("provide a new connection ID after peer retires old one",
+ packetType1RTT, debugFrameNewConnectionID{
+ seq: 2,
+ connID: testLocalConnID(2),
+ token: testLocalStatelessResetToken(2),
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("resend new connection ID",
+ packetType1RTT, debugFrameNewConnectionID{
+ seq: 2,
+ connID: testLocalConnID(2),
+ token: testLocalStatelessResetToken(2),
+ })
+ })
+}
+
+func TestLostRetireConnectionIDFrame(t *testing.T) {
+ // "[...] retired connection IDs are [...] retransmitted
+ // if the packet containing them is lost."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.3-3.13
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameNewConnectionID{
+ seq: 2,
+ retirePriorTo: 1,
+ connID: testPeerConnID(2),
+ })
+ tc.wantFrame("peer requested connection id be retired",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 0,
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("resend RETIRE_CONNECTION_ID",
+ packetType1RTT, debugFrameRetireConnectionID{
+ seq: 0,
+ })
+ })
+}
+
+func TestLostPathResponseFrame(t *testing.T) {
+ // "Responses to path validation using PATH_RESPONSE frames are sent just once."
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.3-3.12
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ tc.ignoreFrame(frameTypePing)
+
+ data := pathChallengeData{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}
+ tc.writeFrames(packetType1RTT, debugFramePathChallenge{
+ data: data,
+ })
+ tc.wantFrame("response to PATH_CHALLENGE",
+ packetType1RTT, debugFramePathResponse{
+ data: data,
+ })
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantIdle("lost PATH_RESPONSE frame is not retransmitted")
+ })
+}
+
+func TestLostHandshakeDoneFrame(t *testing.T) {
+ // "The HANDSHAKE_DONE frame MUST be retransmitted until it is acknowledged."
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-13.3-3.16
+ lostFrameTest(t, func(t *testing.T, pto bool) {
+ tc := newTestConn(t, serverSide)
+ tc.ignoreFrame(frameTypeAck)
+
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ tc.wantFrame("server sends Initial CRYPTO frame",
+ packetTypeInitial, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelInitial],
+ })
+ tc.wantFrame("server sends Handshake CRYPTO frame",
+ packetTypeHandshake, debugFrameCrypto{
+ data: tc.cryptoDataOut[tls.QUICEncryptionLevelHandshake],
+ })
+ tc.wantFrame("server provides an additional connection ID",
+ packetType1RTT, debugFrameNewConnectionID{
+ seq: 1,
+ connID: testLocalConnID(1),
+ token: testLocalStatelessResetToken(1),
+ })
+ tc.writeFrames(packetTypeHandshake,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelHandshake],
+ })
+
+ tc.wantFrame("server sends HANDSHAKE_DONE after handshake completes",
+ packetType1RTT, debugFrameHandshakeDone{})
+
+ tc.triggerLossOrPTO(packetType1RTT, pto)
+ tc.wantFrame("server resends HANDSHAKE_DONE",
+ packetType1RTT, debugFrameHandshakeDone{})
+ })
+}
diff --git a/quic/conn_recv.go b/quic/conn_recv.go
new file mode 100644
index 000000000..b1354cd3a
--- /dev/null
+++ b/quic/conn_recv.go
@@ -0,0 +1,620 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "time"
+)
+
+func (c *Conn) handleDatagram(now time.Time, dgram *datagram) (handled bool) {
+ if !c.localAddr.IsValid() {
+ // We don't have any way to tell in the general case what address we're
+ // sending packets from. Set our address from the destination address of
+ // the first packet received from the peer.
+ c.localAddr = dgram.localAddr
+ }
+ if dgram.peerAddr.IsValid() && dgram.peerAddr != c.peerAddr {
+ if c.side == clientSide {
+ // "If a client receives packets from an unknown server address,
+ // the client MUST discard these packets."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-9-6
+ return false
+ }
+ // We currently don't support connection migration,
+ // so for now the server also drops packets from an unknown address.
+ return false
+ }
+ buf := dgram.b
+ c.loss.datagramReceived(now, len(buf))
+ if c.isDraining() {
+ return false
+ }
+ for len(buf) > 0 {
+ var n int
+ ptype := getPacketType(buf)
+ switch ptype {
+ case packetTypeInitial:
+ if c.side == serverSide && len(dgram.b) < paddedInitialDatagramSize {
+ // Discard client-sent Initial packets in too-short datagrams.
+ // https://www.rfc-editor.org/rfc/rfc9000#section-14.1-4
+ return false
+ }
+ n = c.handleLongHeader(now, dgram, ptype, initialSpace, c.keysInitial.r, buf)
+ case packetTypeHandshake:
+ n = c.handleLongHeader(now, dgram, ptype, handshakeSpace, c.keysHandshake.r, buf)
+ case packetType1RTT:
+ n = c.handle1RTT(now, dgram, buf)
+ case packetTypeRetry:
+ c.handleRetry(now, buf)
+ return true
+ case packetTypeVersionNegotiation:
+ c.handleVersionNegotiation(now, buf)
+ return true
+ default:
+ n = -1
+ }
+ if n <= 0 {
+ // We don't expect to get a stateless reset with a valid
+ // destination connection ID, since the sender of a stateless
+ // reset doesn't know what the connection ID is.
+ //
+ // We're required to perform this check anyway.
+ //
+ // "[...] the comparison MUST be performed when the first packet
+ // in an incoming datagram [...] cannot be decrypted."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-10.3.1-2
+ if len(buf) == len(dgram.b) && len(buf) > statelessResetTokenLen {
+ var token statelessResetToken
+ copy(token[:], buf[len(buf)-len(token):])
+ if c.handleStatelessReset(now, token) {
+ return true
+ }
+ }
+ // Invalid data at the end of a datagram is ignored.
+ return false
+ }
+ c.idleHandlePacketReceived(now)
+ buf = buf[n:]
+ }
+ return true
+}
+
+func (c *Conn) handleLongHeader(now time.Time, dgram *datagram, ptype packetType, space numberSpace, k fixedKeys, buf []byte) int {
+ if !k.isSet() {
+ return skipLongHeaderPacket(buf)
+ }
+
+ pnumMax := c.acks[space].largestSeen()
+ p, n := parseLongHeaderPacket(buf, k, pnumMax)
+ if n < 0 {
+ return -1
+ }
+ if buf[0]&reservedLongBits != 0 {
+ // Reserved header bits must be 0.
+ // https://www.rfc-editor.org/rfc/rfc9000#section-17.2-8.2.1
+ c.abort(now, localTransportError{
+ code: errProtocolViolation,
+ reason: "reserved header bits are not zero",
+ })
+ return -1
+ }
+ if p.version != quicVersion1 {
+ // The peer has changed versions on us mid-handshake?
+ c.abort(now, localTransportError{
+ code: errProtocolViolation,
+ reason: "protocol version changed during handshake",
+ })
+ return -1
+ }
+
+ if !c.acks[space].shouldProcess(p.num) {
+ return n
+ }
+
+ if logPackets {
+ logInboundLongPacket(c, p)
+ }
+ if c.logEnabled(QLogLevelPacket) {
+ c.logLongPacketReceived(p, buf[:n])
+ }
+ c.connIDState.handlePacket(c, p.ptype, p.srcConnID)
+ ackEliciting := c.handleFrames(now, dgram, ptype, space, p.payload)
+ c.acks[space].receive(now, space, p.num, ackEliciting)
+ if p.ptype == packetTypeHandshake && c.side == serverSide {
+ c.loss.validateClientAddress()
+
+ // "[...] a server MUST discard Initial keys when it first successfully
+ // processes a Handshake packet [...]"
+ // https://www.rfc-editor.org/rfc/rfc9001#section-4.9.1-2
+ c.discardKeys(now, initialSpace)
+ }
+ return n
+}
+
+func (c *Conn) handle1RTT(now time.Time, dgram *datagram, buf []byte) int {
+ if !c.keysAppData.canRead() {
+ // 1-RTT packets extend to the end of the datagram,
+ // so skip the remainder of the datagram if we can't parse this.
+ return len(buf)
+ }
+
+ pnumMax := c.acks[appDataSpace].largestSeen()
+ p, err := parse1RTTPacket(buf, &c.keysAppData, connIDLen, pnumMax)
+ if err != nil {
+ // A localTransportError terminates the connection.
+ // Other errors indicate an unparseable packet, but otherwise may be ignored.
+ if _, ok := err.(localTransportError); ok {
+ c.abort(now, err)
+ }
+ return -1
+ }
+ if buf[0]&reserved1RTTBits != 0 {
+ // Reserved header bits must be 0.
+ // https://www.rfc-editor.org/rfc/rfc9000#section-17.3.1-4.8.1
+ c.abort(now, localTransportError{
+ code: errProtocolViolation,
+ reason: "reserved header bits are not zero",
+ })
+ return -1
+ }
+
+ if !c.acks[appDataSpace].shouldProcess(p.num) {
+ return len(buf)
+ }
+
+ if logPackets {
+ logInboundShortPacket(c, p)
+ }
+ if c.logEnabled(QLogLevelPacket) {
+ c.log1RTTPacketReceived(p, buf)
+ }
+ ackEliciting := c.handleFrames(now, dgram, packetType1RTT, appDataSpace, p.payload)
+ c.acks[appDataSpace].receive(now, appDataSpace, p.num, ackEliciting)
+ return len(buf)
+}
+
+func (c *Conn) handleRetry(now time.Time, pkt []byte) {
+ if c.side != clientSide {
+ return // clients don't send Retry packets
+ }
+ // "After the client has received and processed an Initial or Retry packet
+ // from the server, it MUST discard any subsequent Retry packets that it receives."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-17.2.5.2-1
+ if !c.keysInitial.canRead() {
+ return // discarded Initial keys, connection is already established
+ }
+ if c.acks[initialSpace].seen.numRanges() != 0 {
+ return // processed at least one packet
+ }
+ if c.retryToken != nil {
+ return // received a Retry already
+ }
+ // "Clients MUST discard Retry packets that have a Retry Integrity Tag
+ // that cannot be validated."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-17.2.5.2-2
+ p, ok := parseRetryPacket(pkt, c.connIDState.originalDstConnID)
+ if !ok {
+ return
+ }
+ // "A client MUST discard a Retry packet with a zero-length Retry Token field."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-17.2.5.2-2
+ if len(p.token) == 0 {
+ return
+ }
+ c.retryToken = cloneBytes(p.token)
+ c.connIDState.handleRetryPacket(p.srcConnID)
+ // We need to resend any data we've already sent in Initial packets.
+ // We must not reuse already sent packet numbers.
+ c.loss.discardPackets(initialSpace, c.log, c.handleAckOrLoss)
+ // TODO: Discard 0-RTT packets as well, once we support 0-RTT.
+}
+
+var errVersionNegotiation = errors.New("server does not support QUIC version 1")
+
+func (c *Conn) handleVersionNegotiation(now time.Time, pkt []byte) {
+ if c.side != clientSide {
+ return // servers don't handle Version Negotiation packets
+ }
+ // "A client MUST discard any Version Negotiation packet if it has
+ // received and successfully processed any other packet [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-6.2-2
+ if !c.keysInitial.canRead() {
+ return // discarded Initial keys, connection is already established
+ }
+ if c.acks[initialSpace].seen.numRanges() != 0 {
+ return // processed at least one packet
+ }
+ _, srcConnID, versions := parseVersionNegotiation(pkt)
+ if len(c.connIDState.remote) < 1 || !bytes.Equal(c.connIDState.remote[0].cid, srcConnID) {
+ return // Source Connection ID doesn't match what we sent
+ }
+ for len(versions) >= 4 {
+ ver := binary.BigEndian.Uint32(versions)
+ if ver == 1 {
+ // "A client MUST discard a Version Negotiation packet that lists
+ // the QUIC version selected by the client."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-6.2-2
+ return
+ }
+ versions = versions[4:]
+ }
+ // "A client that supports only this version of QUIC MUST
+ // abandon the current connection attempt if it receives
+ // a Version Negotiation packet, [with the two exceptions handled above]."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-6.2-2
+ c.abortImmediately(now, errVersionNegotiation)
+}
+
+func (c *Conn) handleFrames(now time.Time, dgram *datagram, ptype packetType, space numberSpace, payload []byte) (ackEliciting bool) {
+ if len(payload) == 0 {
+ // "An endpoint MUST treat receipt of a packet containing no frames
+ // as a connection error of type PROTOCOL_VIOLATION."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-12.4-3
+ c.abort(now, localTransportError{
+ code: errProtocolViolation,
+ reason: "packet contains no frames",
+ })
+ return false
+ }
+ // frameOK verifies that ptype is one of the packets in mask.
+ frameOK := func(c *Conn, ptype, mask packetType) (ok bool) {
+ if ptype&mask == 0 {
+ // "An endpoint MUST treat receipt of a frame in a packet type
+ // that is not permitted as a connection error of type
+ // PROTOCOL_VIOLATION."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-12.4-3
+ c.abort(now, localTransportError{
+ code: errProtocolViolation,
+ reason: "frame not allowed in packet",
+ })
+ return false
+ }
+ return true
+ }
+ // Packet masks from RFC 9000 Table 3.
+ // https://www.rfc-editor.org/rfc/rfc9000#table-3
+ const (
+ IH_1 = packetTypeInitial | packetTypeHandshake | packetType1RTT
+ __01 = packetType0RTT | packetType1RTT
+ ___1 = packetType1RTT
+ )
+ for len(payload) > 0 {
+ switch payload[0] {
+ case frameTypePadding, frameTypeAck, frameTypeAckECN,
+ frameTypeConnectionCloseTransport, frameTypeConnectionCloseApplication:
+ default:
+ ackEliciting = true
+ }
+ n := -1
+ switch payload[0] {
+ case frameTypePadding:
+ // PADDING is OK in all spaces.
+ n = 1
+ case frameTypePing:
+ // PING is OK in all spaces.
+ //
+ // A PING frame causes us to respond with an ACK by virtue of being
+ // an ack-eliciting frame, but requires no other action.
+ n = 1
+ case frameTypeAck, frameTypeAckECN:
+ if !frameOK(c, ptype, IH_1) {
+ return
+ }
+ n = c.handleAckFrame(now, space, payload)
+ case frameTypeResetStream:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handleResetStreamFrame(now, space, payload)
+ case frameTypeStopSending:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handleStopSendingFrame(now, space, payload)
+ case frameTypeCrypto:
+ if !frameOK(c, ptype, IH_1) {
+ return
+ }
+ n = c.handleCryptoFrame(now, space, payload)
+ case frameTypeNewToken:
+ if !frameOK(c, ptype, ___1) {
+ return
+ }
+ _, n = consumeNewTokenFrame(payload)
+ case 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f: // STREAM
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handleStreamFrame(now, space, payload)
+ case frameTypeMaxData:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handleMaxDataFrame(now, payload)
+ case frameTypeMaxStreamData:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handleMaxStreamDataFrame(now, payload)
+ case frameTypeMaxStreamsBidi, frameTypeMaxStreamsUni:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handleMaxStreamsFrame(now, payload)
+ case frameTypeDataBlocked:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ _, n = consumeDataBlockedFrame(payload)
+ case frameTypeStreamsBlockedBidi, frameTypeStreamsBlockedUni:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ _, _, n = consumeStreamsBlockedFrame(payload)
+ case frameTypeStreamDataBlocked:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ _, _, n = consumeStreamDataBlockedFrame(payload)
+ case frameTypeNewConnectionID:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handleNewConnectionIDFrame(now, space, payload)
+ case frameTypeRetireConnectionID:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handleRetireConnectionIDFrame(now, space, payload)
+ case frameTypePathChallenge:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handlePathChallengeFrame(now, dgram, space, payload)
+ case frameTypePathResponse:
+ if !frameOK(c, ptype, ___1) {
+ return
+ }
+ n = c.handlePathResponseFrame(now, space, payload)
+ case frameTypeConnectionCloseTransport:
+ // Transport CONNECTION_CLOSE is OK in all spaces.
+ n = c.handleConnectionCloseTransportFrame(now, payload)
+ case frameTypeConnectionCloseApplication:
+ if !frameOK(c, ptype, __01) {
+ return
+ }
+ n = c.handleConnectionCloseApplicationFrame(now, payload)
+ case frameTypeHandshakeDone:
+ if !frameOK(c, ptype, ___1) {
+ return
+ }
+ n = c.handleHandshakeDoneFrame(now, space, payload)
+ }
+ if n < 0 {
+ c.abort(now, localTransportError{
+ code: errFrameEncoding,
+ reason: "frame encoding error",
+ })
+ return false
+ }
+ payload = payload[n:]
+ }
+ return ackEliciting
+}
+
+func (c *Conn) handleAckFrame(now time.Time, space numberSpace, payload []byte) int {
+ c.loss.receiveAckStart()
+ largest, ackDelay, n := consumeAckFrame(payload, func(rangeIndex int, start, end packetNumber) {
+ if end > c.loss.nextNumber(space) {
+ // Acknowledgement of a packet we never sent.
+ c.abort(now, localTransportError{
+ code: errProtocolViolation,
+ reason: "acknowledgement for unsent packet",
+ })
+ return
+ }
+ c.loss.receiveAckRange(now, space, rangeIndex, start, end, c.handleAckOrLoss)
+ })
+ // Prior to receiving the peer's transport parameters, we cannot
+ // interpret the ACK Delay field because we don't know the ack_delay_exponent
+ // to apply.
+ //
+ // For servers, we should always know the ack_delay_exponent because the
+ // client's transport parameters are carried in its Initial packets and we
+ // won't send an ack-eliciting Initial packet until after receiving the last
+ // client Initial packet.
+ //
+ // For clients, we won't receive the server's transport parameters until handling
+ // its Handshake flight, which will probably happen after reading its ACK for our
+ // Initial packet(s). However, the peer's acknowledgement delay cannot reduce our
+ // adjusted RTT sample below min_rtt, and min_rtt is generally going to be set
+ // by the packet containing the ACK for our Initial flight. Therefore, the
+ // ACK Delay for an ACK in the Initial space is likely to be ignored anyway.
+ //
+ // Long story short, setting the delay to 0 prior to reading transport parameters
+ // is usually going to have no effect, will have only a minor effect in the rare
+ // cases when it happens, and there aren't any good alternatives anyway since we
+ // can't interpret the ACK Delay field without knowing the exponent.
+ var delay time.Duration
+ if c.peerAckDelayExponent >= 0 {
+ delay = ackDelay.Duration(uint8(c.peerAckDelayExponent))
+ }
+ c.loss.receiveAckEnd(now, c.log, space, delay, c.handleAckOrLoss)
+ if space == appDataSpace {
+ c.keysAppData.handleAckFor(largest)
+ }
+ return n
+}
+
+func (c *Conn) handleMaxDataFrame(now time.Time, payload []byte) int {
+ maxData, n := consumeMaxDataFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ c.streams.outflow.setMaxData(maxData)
+ return n
+}
+
+func (c *Conn) handleMaxStreamDataFrame(now time.Time, payload []byte) int {
+ id, maxStreamData, n := consumeMaxStreamDataFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ if s := c.streamForFrame(now, id, sendStream); s != nil {
+ if err := s.handleMaxStreamData(maxStreamData); err != nil {
+ c.abort(now, err)
+ return -1
+ }
+ }
+ return n
+}
+
+func (c *Conn) handleMaxStreamsFrame(now time.Time, payload []byte) int {
+ styp, max, n := consumeMaxStreamsFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ c.streams.localLimit[styp].setMax(max)
+ return n
+}
+
+func (c *Conn) handleResetStreamFrame(now time.Time, space numberSpace, payload []byte) int {
+ id, code, finalSize, n := consumeResetStreamFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ if s := c.streamForFrame(now, id, recvStream); s != nil {
+ if err := s.handleReset(code, finalSize); err != nil {
+ c.abort(now, err)
+ }
+ }
+ return n
+}
+
+func (c *Conn) handleStopSendingFrame(now time.Time, space numberSpace, payload []byte) int {
+ id, code, n := consumeStopSendingFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ if s := c.streamForFrame(now, id, sendStream); s != nil {
+ if err := s.handleStopSending(code); err != nil {
+ c.abort(now, err)
+ }
+ }
+ return n
+}
+
+func (c *Conn) handleCryptoFrame(now time.Time, space numberSpace, payload []byte) int {
+ off, data, n := consumeCryptoFrame(payload)
+ err := c.handleCrypto(now, space, off, data)
+ if err != nil {
+ c.abort(now, err)
+ return -1
+ }
+ return n
+}
+
+func (c *Conn) handleStreamFrame(now time.Time, space numberSpace, payload []byte) int {
+ id, off, fin, b, n := consumeStreamFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ if s := c.streamForFrame(now, id, recvStream); s != nil {
+ if err := s.handleData(off, b, fin); err != nil {
+ c.abort(now, err)
+ }
+ }
+ return n
+}
+
+func (c *Conn) handleNewConnectionIDFrame(now time.Time, space numberSpace, payload []byte) int {
+ seq, retire, connID, resetToken, n := consumeNewConnectionIDFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ if err := c.connIDState.handleNewConnID(c, seq, retire, connID, resetToken); err != nil {
+ c.abort(now, err)
+ }
+ return n
+}
+
+func (c *Conn) handleRetireConnectionIDFrame(now time.Time, space numberSpace, payload []byte) int {
+ seq, n := consumeRetireConnectionIDFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ if err := c.connIDState.handleRetireConnID(c, seq); err != nil {
+ c.abort(now, err)
+ }
+ return n
+}
+
+func (c *Conn) handlePathChallengeFrame(now time.Time, dgram *datagram, space numberSpace, payload []byte) int {
+ data, n := consumePathChallengeFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ c.handlePathChallenge(now, dgram, data)
+ return n
+}
+
+func (c *Conn) handlePathResponseFrame(now time.Time, space numberSpace, payload []byte) int {
+ data, n := consumePathResponseFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ c.handlePathResponse(now, data)
+ return n
+}
+
+func (c *Conn) handleConnectionCloseTransportFrame(now time.Time, payload []byte) int {
+ code, _, reason, n := consumeConnectionCloseTransportFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ c.handlePeerConnectionClose(now, peerTransportError{code: code, reason: reason})
+ return n
+}
+
+func (c *Conn) handleConnectionCloseApplicationFrame(now time.Time, payload []byte) int {
+ code, reason, n := consumeConnectionCloseApplicationFrame(payload)
+ if n < 0 {
+ return -1
+ }
+ c.handlePeerConnectionClose(now, &ApplicationError{Code: code, Reason: reason})
+ return n
+}
+
+func (c *Conn) handleHandshakeDoneFrame(now time.Time, space numberSpace, payload []byte) int {
+ if c.side == serverSide {
+ // Clients should never send HANDSHAKE_DONE.
+ // https://www.rfc-editor.org/rfc/rfc9000#section-19.20-4
+ c.abort(now, localTransportError{
+ code: errProtocolViolation,
+ reason: "client sent HANDSHAKE_DONE",
+ })
+ return -1
+ }
+ if c.isAlive() {
+ c.confirmHandshake(now)
+ }
+ return 1
+}
+
+var errStatelessReset = errors.New("received stateless reset")
+
+func (c *Conn) handleStatelessReset(now time.Time, resetToken statelessResetToken) (valid bool) {
+ if !c.connIDState.isValidStatelessResetToken(resetToken) {
+ return false
+ }
+ c.setFinalError(errStatelessReset)
+ c.enterDraining(now)
+ return true
+}
diff --git a/quic/conn_send.go b/quic/conn_send.go
new file mode 100644
index 000000000..a87cac232
--- /dev/null
+++ b/quic/conn_send.go
@@ -0,0 +1,405 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "crypto/tls"
+ "errors"
+ "time"
+)
+
+// maybeSend sends datagrams, if possible.
+//
+// If sending is blocked by pacing, it returns the next time
+// a datagram may be sent.
+//
+// If sending is blocked indefinitely, it returns the zero Time.
+func (c *Conn) maybeSend(now time.Time) (next time.Time) {
+ // Assumption: The congestion window is not underutilized.
+ // If congestion control, pacing, and anti-amplification all permit sending,
+ // but we have no packet to send, then we will declare the window underutilized.
+ underutilized := false
+ defer func() {
+ c.loss.cc.setUnderutilized(c.log, underutilized)
+ }()
+
+ // Send one datagram on each iteration of this loop,
+ // until we hit a limit or run out of data to send.
+ //
+ // For each number space where we have write keys,
+ // attempt to construct a packet in that space.
+ // If the packet contains no frames (we have no data in need of sending),
+ // abandon the packet.
+ //
+ // Speculatively constructing packets means we don't need
+ // separate code paths for "do we have data to send?" and
+ // "send the data" that need to be kept in sync.
+ for {
+ limit, next := c.loss.sendLimit(now)
+ if limit == ccBlocked {
+ // If anti-amplification blocks sending, then no packet can be sent.
+ return next
+ }
+ if !c.sendOK(now) {
+ return time.Time{}
+ }
+ // We may still send ACKs, even if congestion control or pacing limit sending.
+
+ // Prepare to write a datagram of at most maxSendSize bytes.
+ c.w.reset(c.loss.maxSendSize())
+
+ dstConnID, ok := c.connIDState.dstConnID()
+ if !ok {
+ // It is currently not possible for us to end up without a connection ID,
+ // but handle the case anyway.
+ return time.Time{}
+ }
+
+ // Initial packet.
+ pad := false
+ var sentInitial *sentPacket
+ if c.keysInitial.canWrite() {
+ pnumMaxAcked := c.loss.spaces[initialSpace].maxAcked
+ pnum := c.loss.nextNumber(initialSpace)
+ p := longPacket{
+ ptype: packetTypeInitial,
+ version: quicVersion1,
+ num: pnum,
+ dstConnID: dstConnID,
+ srcConnID: c.connIDState.srcConnID(),
+ extra: c.retryToken,
+ }
+ c.w.startProtectedLongHeaderPacket(pnumMaxAcked, p)
+ c.appendFrames(now, initialSpace, pnum, limit)
+ if logPackets {
+ logSentPacket(c, packetTypeInitial, pnum, p.srcConnID, p.dstConnID, c.w.payload())
+ }
+ if c.logEnabled(QLogLevelPacket) && len(c.w.payload()) > 0 {
+ c.logPacketSent(packetTypeInitial, pnum, p.srcConnID, p.dstConnID, c.w.packetLen(), c.w.payload())
+ }
+ sentInitial = c.w.finishProtectedLongHeaderPacket(pnumMaxAcked, c.keysInitial.w, p)
+ if sentInitial != nil {
+ // Client initial packets and ack-eliciting server initial packaets
+ // need to be sent in a datagram padded to at least 1200 bytes.
+ // We can't add the padding yet, however, since we may want to
+ // coalesce additional packets with this one.
+ if c.side == clientSide || sentInitial.ackEliciting {
+ pad = true
+ }
+ }
+ }
+
+ // Handshake packet.
+ if c.keysHandshake.canWrite() {
+ pnumMaxAcked := c.loss.spaces[handshakeSpace].maxAcked
+ pnum := c.loss.nextNumber(handshakeSpace)
+ p := longPacket{
+ ptype: packetTypeHandshake,
+ version: quicVersion1,
+ num: pnum,
+ dstConnID: dstConnID,
+ srcConnID: c.connIDState.srcConnID(),
+ }
+ c.w.startProtectedLongHeaderPacket(pnumMaxAcked, p)
+ c.appendFrames(now, handshakeSpace, pnum, limit)
+ if logPackets {
+ logSentPacket(c, packetTypeHandshake, pnum, p.srcConnID, p.dstConnID, c.w.payload())
+ }
+ if c.logEnabled(QLogLevelPacket) && len(c.w.payload()) > 0 {
+ c.logPacketSent(packetTypeHandshake, pnum, p.srcConnID, p.dstConnID, c.w.packetLen(), c.w.payload())
+ }
+ if sent := c.w.finishProtectedLongHeaderPacket(pnumMaxAcked, c.keysHandshake.w, p); sent != nil {
+ c.packetSent(now, handshakeSpace, sent)
+ if c.side == clientSide {
+ // "[...] a client MUST discard Initial keys when it first
+ // sends a Handshake packet [...]"
+ // https://www.rfc-editor.org/rfc/rfc9001.html#section-4.9.1-2
+ c.discardKeys(now, initialSpace)
+ }
+ }
+ }
+
+ // 1-RTT packet.
+ if c.keysAppData.canWrite() {
+ pnumMaxAcked := c.loss.spaces[appDataSpace].maxAcked
+ pnum := c.loss.nextNumber(appDataSpace)
+ c.w.start1RTTPacket(pnum, pnumMaxAcked, dstConnID)
+ c.appendFrames(now, appDataSpace, pnum, limit)
+ if pad && len(c.w.payload()) > 0 {
+ // 1-RTT packets have no length field and extend to the end
+ // of the datagram, so if we're sending a datagram that needs
+ // padding we need to add it inside the 1-RTT packet.
+ c.w.appendPaddingTo(paddedInitialDatagramSize)
+ pad = false
+ }
+ if logPackets {
+ logSentPacket(c, packetType1RTT, pnum, nil, dstConnID, c.w.payload())
+ }
+ if c.logEnabled(QLogLevelPacket) && len(c.w.payload()) > 0 {
+ c.logPacketSent(packetType1RTT, pnum, nil, dstConnID, c.w.packetLen(), c.w.payload())
+ }
+ if sent := c.w.finish1RTTPacket(pnum, pnumMaxAcked, dstConnID, &c.keysAppData); sent != nil {
+ c.packetSent(now, appDataSpace, sent)
+ }
+ }
+
+ buf := c.w.datagram()
+ if len(buf) == 0 {
+ if limit == ccOK {
+ // We have nothing to send, and congestion control does not
+ // block sending. The congestion window is underutilized.
+ underutilized = true
+ }
+ return next
+ }
+
+ if sentInitial != nil {
+ if pad {
+ // Pad out the datagram with zeros, coalescing the Initial
+ // packet with invalid packets that will be ignored by the peer.
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-14.1-1
+ for len(buf) < paddedInitialDatagramSize {
+ buf = append(buf, 0)
+ // Technically this padding isn't in any packet, but
+ // account it to the Initial packet in this datagram
+ // for purposes of flow control and loss recovery.
+ sentInitial.size++
+ sentInitial.inFlight = true
+ }
+ }
+ // If we're a client and this Initial packet is coalesced
+ // with a Handshake packet, then we've discarded Initial keys
+ // since constructing the packet and shouldn't record it as in-flight.
+ if c.keysInitial.canWrite() {
+ c.packetSent(now, initialSpace, sentInitial)
+ }
+ }
+
+ c.endpoint.sendDatagram(datagram{
+ b: buf,
+ peerAddr: c.peerAddr,
+ })
+ }
+}
+
+func (c *Conn) packetSent(now time.Time, space numberSpace, sent *sentPacket) {
+ c.idleHandlePacketSent(now, sent)
+ c.loss.packetSent(now, c.log, space, sent)
+}
+
+func (c *Conn) appendFrames(now time.Time, space numberSpace, pnum packetNumber, limit ccLimit) {
+ if c.lifetime.localErr != nil {
+ c.appendConnectionCloseFrame(now, space, c.lifetime.localErr)
+ return
+ }
+
+ shouldSendAck := c.acks[space].shouldSendAck(now)
+ if limit != ccOK {
+ // ACKs are not limited by congestion control.
+ if shouldSendAck && c.appendAckFrame(now, space) {
+ c.acks[space].sentAck()
+ }
+ return
+ }
+ // We want to send an ACK frame if the ack controller wants to send a frame now,
+ // OR if we are sending a packet anyway and have ack-eliciting packets which we
+ // have not yet acked.
+ //
+ // We speculatively add ACK frames here, to put them at the front of the packet
+ // to avoid truncation.
+ //
+ // After adding all frames, if we don't need to send an ACK frame and have not
+ // added any other frames, we abandon the packet.
+ if c.appendAckFrame(now, space) {
+ defer func() {
+ // All frames other than ACK and PADDING are ack-eliciting,
+ // so if the packet is ack-eliciting we've added additional
+ // frames to it.
+ if !shouldSendAck && !c.w.sent.ackEliciting {
+ // There's nothing in this packet but ACK frames, and
+ // we don't want to send an ACK-only packet at this time.
+ // Abandoning the packet means we wrote an ACK frame for
+ // nothing, but constructing the frame is cheap.
+ c.w.abandonPacket()
+ return
+ }
+ // Either we are willing to send an ACK-only packet,
+ // or we've added additional frames.
+ c.acks[space].sentAck()
+ if !c.w.sent.ackEliciting && c.shouldMakePacketAckEliciting() {
+ c.w.appendPingFrame()
+ }
+ }()
+ }
+ if limit != ccOK {
+ return
+ }
+ pto := c.loss.ptoExpired
+
+ // TODO: Add all the other frames we can send.
+
+ // CRYPTO
+ c.crypto[space].dataToSend(pto, func(off, size int64) int64 {
+ b, _ := c.w.appendCryptoFrame(off, int(size))
+ c.crypto[space].sendData(off, b)
+ return int64(len(b))
+ })
+
+ // Test-only PING frames.
+ if space == c.testSendPingSpace && c.testSendPing.shouldSendPTO(pto) {
+ if !c.w.appendPingFrame() {
+ return
+ }
+ c.testSendPing.setSent(pnum)
+ }
+
+ if space == appDataSpace {
+ // HANDSHAKE_DONE
+ if c.handshakeConfirmed.shouldSendPTO(pto) {
+ if !c.w.appendHandshakeDoneFrame() {
+ return
+ }
+ c.handshakeConfirmed.setSent(pnum)
+ }
+
+ // NEW_CONNECTION_ID, RETIRE_CONNECTION_ID
+ if !c.connIDState.appendFrames(c, pnum, pto) {
+ return
+ }
+
+ // PATH_RESPONSE
+ if pad, ok := c.appendPathFrames(); !ok {
+ return
+ } else if pad {
+ defer c.w.appendPaddingTo(smallestMaxDatagramSize)
+ }
+
+ // All stream-related frames. This should come last in the packet,
+ // so large amounts of STREAM data don't crowd out other frames
+ // we may need to send.
+ if !c.appendStreamFrames(&c.w, pnum, pto) {
+ return
+ }
+
+ if !c.appendKeepAlive(now) {
+ return
+ }
+ }
+
+ // If this is a PTO probe and we haven't added an ack-eliciting frame yet,
+ // add a PING to make this an ack-eliciting probe.
+ //
+ // Technically, there are separate PTO timers for each number space.
+ // When a PTO timer expires, we MUST send an ack-eliciting packet in the
+ // timer's space. We SHOULD send ack-eliciting packets in every other space
+ // with in-flight data. (RFC 9002, section 6.2.4)
+ //
+ // What we actually do is send a single datagram containing an ack-eliciting packet
+ // for every space for which we have keys.
+ //
+ // We fill the PTO probe packets with new or unacknowledged data. For example,
+ // a PTO probe sent for the Initial space will generally retransmit previously
+ // sent but unacknowledged CRYPTO data.
+ //
+ // When sending a PTO probe datagram containing multiple packets, it is
+ // possible that an earlier packet will fill up the datagram, leaving no
+ // space for the remaining probe packet(s). This is not a problem in practice.
+ //
+ // A client discards Initial keys when it first sends a Handshake packet
+ // (RFC 9001 Section 4.9.1). Handshake keys are discarded when the handshake
+ // is confirmed (RFC 9001 Section 4.9.2). The PTO timer is not set for the
+ // Application Data packet number space until the handshake is confirmed
+ // (RFC 9002 Section 6.2.1). Therefore, the only times a PTO probe can fire
+ // while data for multiple spaces is in flight are:
+ //
+ // - a server's Initial or Handshake timers can fire while Initial and Handshake
+ // data is in flight; and
+ //
+ // - a client's Handshake timer can fire while Handshake and Application Data
+ // data is in flight.
+ //
+ // It is theoretically possible for a server's Initial CRYPTO data to overflow
+ // the maximum datagram size, but unlikely in practice; this space contains
+ // only the ServerHello TLS message, which is small. It's also unlikely that
+ // the Handshake PTO probe will fire while Initial data is in flight (this
+ // requires not just that the Initial CRYPTO data completely fill a datagram,
+ // but a quite specific arrangement of lost and retransmitted packets.)
+ // We don't bother worrying about this case here, since the worst case is
+ // that we send a PTO probe for the in-flight Initial data and drop the
+ // Handshake probe.
+ //
+ // If a client's Handshake PTO timer fires while Application Data data is in
+ // flight, it is possible that the resent Handshake CRYPTO data will crowd
+ // out the probe for the Application Data space. However, since this probe is
+ // optional (recall that the Application Data PTO timer is never set until
+ // after Handshake keys have been discarded), dropping it is acceptable.
+ if pto && !c.w.sent.ackEliciting {
+ c.w.appendPingFrame()
+ }
+}
+
+// shouldMakePacketAckEliciting is called when sending a packet containing nothing but an ACK frame.
+// It reports whether we should add a PING frame to the packet to make it ack-eliciting.
+func (c *Conn) shouldMakePacketAckEliciting() bool {
+ if c.keysAppData.needAckEliciting() {
+ // The peer has initiated a key update.
+ // We haven't sent them any packets yet in the new phase.
+ // Make this an ack-eliciting packet.
+ // Their ack of this packet will complete the key update.
+ return true
+ }
+ if c.loss.consecutiveNonAckElicitingPackets >= 19 {
+ // We've sent a run of non-ack-eliciting packets.
+ // Add in an ack-eliciting one every once in a while so the peer
+ // lets us know which ones have arrived.
+ //
+ // Google QUICHE injects a PING after sending 19 packets. We do the same.
+ //
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.2.4-2
+ return true
+ }
+ // TODO: Consider making every packet sent when in PTO ack-eliciting to speed up recovery.
+ return false
+}
+
+func (c *Conn) appendAckFrame(now time.Time, space numberSpace) bool {
+ seen, delay := c.acks[space].acksToSend(now)
+ if len(seen) == 0 {
+ return false
+ }
+ d := unscaledAckDelayFromDuration(delay, ackDelayExponent)
+ return c.w.appendAckFrame(seen, d)
+}
+
+func (c *Conn) appendConnectionCloseFrame(now time.Time, space numberSpace, err error) {
+ c.sentConnectionClose(now)
+ switch e := err.(type) {
+ case localTransportError:
+ c.w.appendConnectionCloseTransportFrame(e.code, 0, e.reason)
+ case *ApplicationError:
+ if space != appDataSpace {
+ // "CONNECTION_CLOSE frames signaling application errors (type 0x1d)
+ // MUST only appear in the application data packet number space."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-12.5-2.2
+ c.w.appendConnectionCloseTransportFrame(errApplicationError, 0, "")
+ } else {
+ c.w.appendConnectionCloseApplicationFrame(e.Code, e.Reason)
+ }
+ default:
+ // TLS alerts are sent using error codes [0x0100,0x01ff).
+ // https://www.rfc-editor.org/rfc/rfc9000#section-20.1-2.36.1
+ var alert tls.AlertError
+ switch {
+ case errors.As(err, &alert):
+ // tls.AlertError is a uint8, so this can't exceed 0x01ff.
+ code := errTLSBase + transportError(alert)
+ c.w.appendConnectionCloseTransportFrame(code, 0, "")
+ default:
+ c.w.appendConnectionCloseTransportFrame(errInternal, 0, "")
+ }
+ }
+}
diff --git a/quic/conn_send_test.go b/quic/conn_send_test.go
new file mode 100644
index 000000000..2205ff2f7
--- /dev/null
+++ b/quic/conn_send_test.go
@@ -0,0 +1,83 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "testing"
+ "time"
+)
+
+func TestAckElicitingAck(t *testing.T) {
+ // "A receiver that sends only non-ack-eliciting packets [...] might not receive
+ // an acknowledgment for a long period of time.
+ // [...] a receiver could send a [...] ack-eliciting frame occasionally [...]
+ // to elicit an ACK from the peer."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-13.2.4-2
+ //
+ // Send a bunch of ack-eliciting packets, verify that the conn doesn't just
+ // send ACKs in response.
+ tc := newTestConn(t, clientSide, permissiveTransportParameters)
+ tc.handshake()
+ const count = 100
+ for i := 0; i < count; i++ {
+ tc.advance(1 * time.Millisecond)
+ tc.writeFrames(packetType1RTT,
+ debugFramePing{},
+ )
+ got, _ := tc.readFrame()
+ switch got.(type) {
+ case debugFrameAck:
+ continue
+ case debugFramePing:
+ return
+ }
+ }
+ t.Errorf("after sending %v PINGs, got no ack-eliciting response", count)
+}
+
+func TestSendPacketNumberSize(t *testing.T) {
+ tc := newTestConn(t, clientSide, permissiveTransportParameters)
+ tc.handshake()
+
+ recvPing := func() *testPacket {
+ t.Helper()
+ tc.conn.ping(appDataSpace)
+ p := tc.readPacket()
+ if p == nil {
+ t.Fatalf("want packet containing PING, got none")
+ }
+ return p
+ }
+
+ // Desynchronize the packet numbers the conn is sending and the ones it is receiving,
+ // by having the conn send a number of unacked packets.
+ for i := 0; i < 16; i++ {
+ recvPing()
+ }
+
+ // Establish the maximum packet number the conn has received an ACK for.
+ maxAcked := recvPing().num
+ tc.writeAckForAll()
+
+ // Make the conn send a sequence of packets.
+ // Check that the packet number is encoded with two bytes once the difference between the
+ // current packet and the max acked one is sufficiently large.
+ for want := maxAcked + 1; want < maxAcked+0x100; want++ {
+ p := recvPing()
+ if p.num != want {
+ t.Fatalf("received packet number %v, want %v", p.num, want)
+ }
+ gotPnumLen := int(p.header&0x03) + 1
+ wantPnumLen := 1
+ if p.num-maxAcked >= 0x80 {
+ wantPnumLen = 2
+ }
+ if gotPnumLen != wantPnumLen {
+ t.Fatalf("packet number 0x%x encoded with %v bytes, want %v (max acked = %v)", p.num, gotPnumLen, wantPnumLen, maxAcked)
+ }
+ }
+}
diff --git a/quic/conn_streams.go b/quic/conn_streams.go
new file mode 100644
index 000000000..87cfd297e
--- /dev/null
+++ b/quic/conn_streams.go
@@ -0,0 +1,471 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type streamsState struct {
+ queue queue[*Stream] // new, peer-created streams
+
+ // All peer-created streams.
+ //
+ // Implicitly created streams are included as an empty entry in the map.
+ // (For example, if we receive a frame for stream 4, we implicitly create stream 0 and
+ // insert an empty entry for it to the map.)
+ //
+ // The map value is maybeStream rather than *Stream as a reminder that values can be nil.
+ streams map[streamID]maybeStream
+
+ // Limits on the number of streams, indexed by streamType.
+ localLimit [streamTypeCount]localStreamLimits
+ remoteLimit [streamTypeCount]remoteStreamLimits
+
+ // Peer configuration provided in transport parameters.
+ peerInitialMaxStreamDataRemote [streamTypeCount]int64 // streams opened by us
+ peerInitialMaxStreamDataBidiLocal int64 // streams opened by them
+
+ // Connection-level flow control.
+ inflow connInflow
+ outflow connOutflow
+
+ // Streams with frames to send are stored in one of two circular linked lists,
+ // depending on whether they require connection-level flow control.
+ needSend atomic.Bool
+ sendMu sync.Mutex
+ queueMeta streamRing // streams with any non-flow-controlled frames
+ queueData streamRing // streams with only flow-controlled frames
+}
+
+// maybeStream is a possibly nil *Stream. See streamsState.streams.
+type maybeStream struct {
+ s *Stream
+}
+
+func (c *Conn) streamsInit() {
+ c.streams.streams = make(map[streamID]maybeStream)
+ c.streams.queue = newQueue[*Stream]()
+ c.streams.localLimit[bidiStream].init()
+ c.streams.localLimit[uniStream].init()
+ c.streams.remoteLimit[bidiStream].init(c.config.maxBidiRemoteStreams())
+ c.streams.remoteLimit[uniStream].init(c.config.maxUniRemoteStreams())
+ c.inflowInit()
+}
+
+func (c *Conn) streamsCleanup() {
+ c.streams.queue.close(errConnClosed)
+ c.streams.localLimit[bidiStream].connHasClosed()
+ c.streams.localLimit[uniStream].connHasClosed()
+ for _, s := range c.streams.streams {
+ if s.s != nil {
+ s.s.connHasClosed()
+ }
+ }
+}
+
+// AcceptStream waits for and returns the next stream created by the peer.
+func (c *Conn) AcceptStream(ctx context.Context) (*Stream, error) {
+ return c.streams.queue.get(ctx, c.testHooks)
+}
+
+// NewStream creates a stream.
+//
+// If the peer's maximum stream limit for the connection has been reached,
+// NewStream blocks until the limit is increased or the context expires.
+func (c *Conn) NewStream(ctx context.Context) (*Stream, error) {
+ return c.newLocalStream(ctx, bidiStream)
+}
+
+// NewSendOnlyStream creates a unidirectional, send-only stream.
+//
+// If the peer's maximum stream limit for the connection has been reached,
+// NewSendOnlyStream blocks until the limit is increased or the context expires.
+func (c *Conn) NewSendOnlyStream(ctx context.Context) (*Stream, error) {
+ return c.newLocalStream(ctx, uniStream)
+}
+
+func (c *Conn) newLocalStream(ctx context.Context, styp streamType) (*Stream, error) {
+ num, err := c.streams.localLimit[styp].open(ctx, c)
+ if err != nil {
+ return nil, err
+ }
+
+ s := newStream(c, newStreamID(c.side, styp, num))
+ s.outmaxbuf = c.config.maxStreamWriteBufferSize()
+ s.outwin = c.streams.peerInitialMaxStreamDataRemote[styp]
+ if styp == bidiStream {
+ s.inmaxbuf = c.config.maxStreamReadBufferSize()
+ s.inwin = c.config.maxStreamReadBufferSize()
+ }
+ s.inUnlock()
+ s.outUnlock()
+
+ // Modify c.streams on the conn's loop.
+ if err := c.runOnLoop(ctx, func(now time.Time, c *Conn) {
+ c.streams.streams[s.id] = maybeStream{s}
+ }); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// streamFrameType identifies which direction of a stream,
+// from the local perspective, a frame is associated with.
+//
+// For example, STREAM is a recvStream frame,
+// because it carries data from the peer to us.
+type streamFrameType uint8
+
+const (
+ sendStream = streamFrameType(iota) // for example, MAX_DATA
+ recvStream // for example, STREAM_DATA_BLOCKED
+)
+
+// streamForID returns the stream with the given id.
+// If the stream does not exist, it returns nil.
+func (c *Conn) streamForID(id streamID) *Stream {
+ return c.streams.streams[id].s
+}
+
+// streamForFrame returns the stream with the given id.
+// If the stream does not exist, it may be created.
+//
+// streamForFrame aborts the connection if the stream id, state, and frame type don't align.
+// For example, it aborts the connection with a STREAM_STATE error if a MAX_DATA frame
+// is received for a receive-only stream, or if the peer attempts to create a stream that
+// should be originated locally.
+//
+// streamForFrame returns nil if the stream no longer exists or if an error occurred.
+func (c *Conn) streamForFrame(now time.Time, id streamID, ftype streamFrameType) *Stream {
+ if id.streamType() == uniStream {
+ if (id.initiator() == c.side) != (ftype == sendStream) {
+ // Received an invalid frame for unidirectional stream.
+ // For example, a RESET_STREAM frame for a send-only stream.
+ c.abort(now, localTransportError{
+ code: errStreamState,
+ reason: "invalid frame for unidirectional stream",
+ })
+ return nil
+ }
+ }
+
+ ms, isOpen := c.streams.streams[id]
+ if ms.s != nil {
+ return ms.s
+ }
+
+ num := id.num()
+ styp := id.streamType()
+ if id.initiator() == c.side {
+ if num < c.streams.localLimit[styp].opened {
+ // This stream was created by us, and has been closed.
+ return nil
+ }
+ // Received a frame for a stream that should be originated by us,
+ // but which we never created.
+ c.abort(now, localTransportError{
+ code: errStreamState,
+ reason: "received frame for unknown stream",
+ })
+ return nil
+ } else {
+ // if isOpen, this is a stream that was implicitly opened by a
+ // previous frame for a larger-numbered stream, but we haven't
+ // actually created it yet.
+ if !isOpen && num < c.streams.remoteLimit[styp].opened {
+ // This stream was created by the peer, and has been closed.
+ return nil
+ }
+ }
+
+ prevOpened := c.streams.remoteLimit[styp].opened
+ if err := c.streams.remoteLimit[styp].open(id); err != nil {
+ c.abort(now, err)
+ return nil
+ }
+
+ // Receiving a frame for a stream implicitly creates all streams
+ // with the same initiator and type and a lower number.
+ // Add a nil entry to the streams map for each implicitly created stream.
+ for n := newStreamID(id.initiator(), id.streamType(), prevOpened); n < id; n += 4 {
+ c.streams.streams[n] = maybeStream{}
+ }
+
+ s := newStream(c, id)
+ s.inmaxbuf = c.config.maxStreamReadBufferSize()
+ s.inwin = c.config.maxStreamReadBufferSize()
+ if id.streamType() == bidiStream {
+ s.outmaxbuf = c.config.maxStreamWriteBufferSize()
+ s.outwin = c.streams.peerInitialMaxStreamDataBidiLocal
+ }
+ s.inUnlock()
+ s.outUnlock()
+
+ c.streams.streams[id] = maybeStream{s}
+ c.streams.queue.put(s)
+ return s
+}
+
+// maybeQueueStreamForSend marks a stream as containing frames that need sending.
+func (c *Conn) maybeQueueStreamForSend(s *Stream, state streamState) {
+ if state.wantQueue() == state.inQueue() {
+ return // already on the right queue
+ }
+ c.streams.sendMu.Lock()
+ defer c.streams.sendMu.Unlock()
+ state = s.state.load() // may have changed while waiting
+ c.queueStreamForSendLocked(s, state)
+
+ c.streams.needSend.Store(true)
+ c.wake()
+}
+
+// queueStreamForSendLocked moves a stream to the correct send queue,
+// or removes it from all queues.
+//
+// state is the last known stream state.
+func (c *Conn) queueStreamForSendLocked(s *Stream, state streamState) {
+ for {
+ wantQueue := state.wantQueue()
+ inQueue := state.inQueue()
+ if inQueue == wantQueue {
+ return // already on the right queue
+ }
+
+ switch inQueue {
+ case metaQueue:
+ c.streams.queueMeta.remove(s)
+ case dataQueue:
+ c.streams.queueData.remove(s)
+ }
+
+ switch wantQueue {
+ case metaQueue:
+ c.streams.queueMeta.append(s)
+ state = s.state.set(streamQueueMeta, streamQueueMeta|streamQueueData)
+ case dataQueue:
+ c.streams.queueData.append(s)
+ state = s.state.set(streamQueueData, streamQueueMeta|streamQueueData)
+ case noQueue:
+ state = s.state.set(0, streamQueueMeta|streamQueueData)
+ }
+
+ // If the stream state changed while we were moving the stream,
+ // we might now be on the wrong queue.
+ //
+ // For example:
+ // - stream has data to send: streamOutSendData|streamQueueData
+ // - appendStreamFrames sends all the data: streamQueueData
+ // - concurrently, more data is written: streamOutSendData|streamQueueData
+ // - appendStreamFrames calls us with the last state it observed
+ // (streamQueueData).
+ // - We remove the stream from the queue and observe the updated state:
+ // streamOutSendData
+ // - We realize that the stream needs to go back on the data queue.
+ //
+ // Go back around the loop to confirm we're on the correct queue.
+ }
+}
+
+// appendStreamFrames writes stream-related frames to the current packet.
+//
+// It returns true if no more frames need appending,
+// false if not everything fit in the current packet.
+func (c *Conn) appendStreamFrames(w *packetWriter, pnum packetNumber, pto bool) bool {
+ // MAX_DATA
+ if !c.appendMaxDataFrame(w, pnum, pto) {
+ return false
+ }
+
+ // MAX_STREAM_DATA
+ if !c.streams.remoteLimit[uniStream].appendFrame(w, uniStream, pnum, pto) {
+ return false
+ }
+ if !c.streams.remoteLimit[bidiStream].appendFrame(w, bidiStream, pnum, pto) {
+ return false
+ }
+
+ if pto {
+ return c.appendStreamFramesPTO(w, pnum)
+ }
+ if !c.streams.needSend.Load() {
+ return true
+ }
+ c.streams.sendMu.Lock()
+ defer c.streams.sendMu.Unlock()
+ // queueMeta contains streams with non-flow-controlled frames to send.
+ for c.streams.queueMeta.head != nil {
+ s := c.streams.queueMeta.head
+ state := s.state.load()
+ if state&(streamQueueMeta|streamConnRemoved) != streamQueueMeta {
+ panic("BUG: queueMeta stream is not streamQueueMeta")
+ }
+ if state&streamInSendMeta != 0 {
+ s.ingate.lock()
+ ok := s.appendInFramesLocked(w, pnum, pto)
+ state = s.inUnlockNoQueue()
+ if !ok {
+ return false
+ }
+ if state&streamInSendMeta != 0 {
+ panic("BUG: streamInSendMeta set after successfully appending frames")
+ }
+ }
+ if state&streamOutSendMeta != 0 {
+ s.outgate.lock()
+ // This might also append flow-controlled frames if we have any
+ // and available conn-level quota. That's fine.
+ ok := s.appendOutFramesLocked(w, pnum, pto)
+ state = s.outUnlockNoQueue()
+ // We're checking both ok and state, because appendOutFramesLocked
+ // might have filled up the packet with flow-controlled data.
+ // If so, we want to move the stream to queueData for any remaining frames.
+ if !ok && state&streamOutSendMeta != 0 {
+ return false
+ }
+ if state&streamOutSendMeta != 0 {
+ panic("BUG: streamOutSendMeta set after successfully appending frames")
+ }
+ }
+ // We've sent all frames for this stream, so remove it from the send queue.
+ c.streams.queueMeta.remove(s)
+ if state&(streamInDone|streamOutDone) == streamInDone|streamOutDone {
+ // Stream is finished, remove it from the conn.
+ state = s.state.set(streamConnRemoved, streamQueueMeta|streamConnRemoved)
+ delete(c.streams.streams, s.id)
+
+ // Record finalization of remote streams, to know when
+ // to extend the peer's stream limit.
+ if s.id.initiator() != c.side {
+ c.streams.remoteLimit[s.id.streamType()].close()
+ }
+ } else {
+ state = s.state.set(0, streamQueueMeta|streamConnRemoved)
+ }
+ // The stream may have flow-controlled data to send,
+ // or something might have added non-flow-controlled frames after we
+ // unlocked the stream.
+ // If so, put the stream back on a queue.
+ c.queueStreamForSendLocked(s, state)
+ }
+ // queueData contains streams with flow-controlled frames.
+ for c.streams.queueData.head != nil {
+ avail := c.streams.outflow.avail()
+ if avail == 0 {
+ break // no flow control quota available
+ }
+ s := c.streams.queueData.head
+ s.outgate.lock()
+ ok := s.appendOutFramesLocked(w, pnum, pto)
+ state := s.outUnlockNoQueue()
+ if !ok {
+ // We've sent some data for this stream, but it still has more to send.
+ // If the stream got a reasonable chance to put data in a packet,
+ // advance sendHead to the next stream in line, to avoid starvation.
+ // We'll come back to this stream after going through the others.
+ //
+ // If the packet was already mostly out of space, leave sendHead alone
+ // and come back to this stream again on the next packet.
+ if avail > 512 {
+ c.streams.queueData.head = s.next
+ }
+ return false
+ }
+ if state&streamQueueData == 0 {
+ panic("BUG: queueData stream is not streamQueueData")
+ }
+ if state&streamOutSendData != 0 {
+ // We must have run out of connection-level flow control:
+ // appendOutFramesLocked says it wrote all it can, but there's
+ // still data to send.
+ //
+ // Advance sendHead to the next stream in line to avoid starvation.
+ if c.streams.outflow.avail() != 0 {
+ panic("BUG: streamOutSendData set and flow control available after send")
+ }
+ c.streams.queueData.head = s.next
+ return true
+ }
+ c.streams.queueData.remove(s)
+ state = s.state.set(0, streamQueueData)
+ c.queueStreamForSendLocked(s, state)
+ }
+ if c.streams.queueMeta.head == nil && c.streams.queueData.head == nil {
+ c.streams.needSend.Store(false)
+ }
+ return true
+}
+
+// appendStreamFramesPTO writes stream-related frames to the current packet
+// for a PTO probe.
+//
+// It returns true if no more frames need appending,
+// false if not everything fit in the current packet.
+func (c *Conn) appendStreamFramesPTO(w *packetWriter, pnum packetNumber) bool {
+ c.streams.sendMu.Lock()
+ defer c.streams.sendMu.Unlock()
+ const pto = true
+ for _, ms := range c.streams.streams {
+ s := ms.s
+ if s == nil {
+ continue
+ }
+ const pto = true
+ s.ingate.lock()
+ inOK := s.appendInFramesLocked(w, pnum, pto)
+ s.inUnlockNoQueue()
+ if !inOK {
+ return false
+ }
+
+ s.outgate.lock()
+ outOK := s.appendOutFramesLocked(w, pnum, pto)
+ s.outUnlockNoQueue()
+ if !outOK {
+ return false
+ }
+ }
+ return true
+}
+
+// A streamRing is a circular linked list of streams.
+type streamRing struct {
+ head *Stream
+}
+
+// remove removes s from the ring.
+// s must be on the ring.
+func (r *streamRing) remove(s *Stream) {
+ if s.next == s {
+ r.head = nil // s was the last stream in the ring
+ } else {
+ s.prev.next = s.next
+ s.next.prev = s.prev
+ if r.head == s {
+ r.head = s.next
+ }
+ }
+}
+
+// append places s at the last position in the ring.
+// s must not be attached to any ring.
+func (r *streamRing) append(s *Stream) {
+ if r.head == nil {
+ r.head = s
+ s.next = s
+ s.prev = s
+ } else {
+ s.prev = r.head.prev
+ s.next = r.head
+ s.prev.next = s
+ s.next.prev = s
+ }
+}
diff --git a/quic/conn_streams_test.go b/quic/conn_streams_test.go
new file mode 100644
index 000000000..dc81ad991
--- /dev/null
+++ b/quic/conn_streams_test.go
@@ -0,0 +1,559 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "sync"
+ "testing"
+)
+
+func TestStreamsCreate(t *testing.T) {
+ ctx := canceledContext()
+ tc := newTestConn(t, clientSide, permissiveTransportParameters)
+ tc.handshake()
+
+ s, err := tc.conn.NewStream(ctx)
+ if err != nil {
+ t.Fatalf("NewStream: %v", err)
+ }
+ s.Flush() // open the stream
+ tc.wantFrame("created bidirectional stream 0",
+ packetType1RTT, debugFrameStream{
+ id: 0, // client-initiated, bidi, number 0
+ data: []byte{},
+ })
+
+ s, err = tc.conn.NewSendOnlyStream(ctx)
+ if err != nil {
+ t.Fatalf("NewStream: %v", err)
+ }
+ s.Flush() // open the stream
+ tc.wantFrame("created unidirectional stream 0",
+ packetType1RTT, debugFrameStream{
+ id: 2, // client-initiated, uni, number 0
+ data: []byte{},
+ })
+
+ s, err = tc.conn.NewStream(ctx)
+ if err != nil {
+ t.Fatalf("NewStream: %v", err)
+ }
+ s.Flush() // open the stream
+ tc.wantFrame("created bidirectional stream 1",
+ packetType1RTT, debugFrameStream{
+ id: 4, // client-initiated, uni, number 4
+ data: []byte{},
+ })
+}
+
+func TestStreamsAccept(t *testing.T) {
+ ctx := canceledContext()
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameStream{
+ id: 0, // client-initiated, bidi, number 0
+ },
+ debugFrameStream{
+ id: 2, // client-initiated, uni, number 0
+ },
+ debugFrameStream{
+ id: 4, // client-initiated, bidi, number 1
+ })
+
+ for _, accept := range []struct {
+ id streamID
+ readOnly bool
+ }{
+ {0, false},
+ {2, true},
+ {4, false},
+ } {
+ s, err := tc.conn.AcceptStream(ctx)
+ if err != nil {
+ t.Fatalf("conn.AcceptStream() = %v, want stream %v", err, accept.id)
+ }
+ if got, want := s.id, accept.id; got != want {
+ t.Fatalf("conn.AcceptStream() = stream %v, want %v", got, want)
+ }
+ if got, want := s.IsReadOnly(), accept.readOnly; got != want {
+ t.Fatalf("stream %v: s.IsReadOnly() = %v, want %v", accept.id, got, want)
+ }
+ }
+
+ _, err := tc.conn.AcceptStream(ctx)
+ if err != context.Canceled {
+ t.Fatalf("conn.AcceptStream() = %v, want context.Canceled", err)
+ }
+}
+
+func TestStreamsBlockingAccept(t *testing.T) {
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+
+ a := runAsync(tc, func(ctx context.Context) (*Stream, error) {
+ return tc.conn.AcceptStream(ctx)
+ })
+ if _, err := a.result(); err != errNotDone {
+ tc.t.Fatalf("AcceptStream() = _, %v; want errNotDone", err)
+ }
+
+ sid := newStreamID(clientSide, bidiStream, 0)
+ tc.writeFrames(packetType1RTT,
+ debugFrameStream{
+ id: sid,
+ })
+
+ s, err := a.result()
+ if err != nil {
+ t.Fatalf("conn.AcceptStream() = _, %v, want stream", err)
+ }
+ if got, want := s.id, sid; got != want {
+ t.Fatalf("conn.AcceptStream() = stream %v, want %v", got, want)
+ }
+ if got, want := s.IsReadOnly(), false; got != want {
+ t.Fatalf("s.IsReadOnly() = %v, want %v", got, want)
+ }
+}
+
+func TestStreamsLocalStreamNotCreated(t *testing.T) {
+ // "An endpoint MUST terminate the connection with error STREAM_STATE_ERROR
+ // if it receives a STREAM frame for a locally initiated stream that has
+ // not yet been created [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-19.8-3
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameStream{
+ id: 1, // server-initiated, bidi, number 0
+ })
+ tc.wantFrame("peer sent STREAM frame for an uncreated local stream",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errStreamState,
+ })
+}
+
+func TestStreamsLocalStreamClosed(t *testing.T) {
+ tc, s := newTestConnAndLocalStream(t, clientSide, uniStream, permissiveTransportParameters)
+ s.CloseWrite()
+ tc.wantFrame("FIN for closed stream",
+ packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, uniStream, 0),
+ fin: true,
+ data: []byte{},
+ })
+ tc.writeAckForAll()
+
+ tc.writeFrames(packetType1RTT, debugFrameStopSending{
+ id: newStreamID(clientSide, uniStream, 0),
+ })
+ tc.wantIdle("frame for finalized stream is ignored")
+
+ // ACKing the last stream packet should have cleaned up the stream.
+ // Check that we don't have any state left.
+ if got := len(tc.conn.streams.streams); got != 0 {
+ t.Fatalf("after close, len(tc.conn.streams.streams) = %v, want 0", got)
+ }
+ if tc.conn.streams.queueMeta.head != nil {
+ t.Fatalf("after close, stream send queue is not empty; should be")
+ }
+}
+
+func TestStreamsStreamSendOnly(t *testing.T) {
+ // "An endpoint MUST terminate the connection with error STREAM_STATE_ERROR
+ // if it receives a STREAM frame for a locally initiated stream that has
+ // not yet been created [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-19.8-3
+ ctx := canceledContext()
+ tc := newTestConn(t, serverSide, permissiveTransportParameters)
+ tc.handshake()
+
+ s, err := tc.conn.NewSendOnlyStream(ctx)
+ if err != nil {
+ t.Fatalf("NewStream: %v", err)
+ }
+ s.Flush() // open the stream
+ tc.wantFrame("created unidirectional stream 0",
+ packetType1RTT, debugFrameStream{
+ id: 3, // server-initiated, uni, number 0
+ data: []byte{},
+ })
+
+ tc.writeFrames(packetType1RTT,
+ debugFrameStream{
+ id: 3, // server-initiated, bidi, number 0
+ })
+ tc.wantFrame("peer sent STREAM frame for a send-only stream",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errStreamState,
+ })
+}
+
+func TestStreamsWriteQueueFairness(t *testing.T) {
+ ctx := canceledContext()
+ const dataLen = 1 << 20
+ const numStreams = 3
+ tc := newTestConn(t, clientSide, func(p *transportParameters) {
+ p.initialMaxStreamsBidi = numStreams
+ p.initialMaxData = 1<<62 - 1
+ p.initialMaxStreamDataBidiRemote = dataLen
+ }, func(c *Config) {
+ c.MaxStreamWriteBufferSize = dataLen
+ })
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ // Create a number of streams, and write a bunch of data to them.
+ // The streams are not limited by flow control.
+ //
+ // The first stream we create is going to immediately consume all
+ // available congestion window.
+ //
+ // Once we've created all the remaining streams,
+ // we start sending acks back to open up the congestion window.
+ // We verify that all streams can make progress.
+ data := make([]byte, dataLen)
+ var streams []*Stream
+ for i := 0; i < numStreams; i++ {
+ s, err := tc.conn.NewStream(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ streams = append(streams, s)
+ if n, err := s.Write(data); n != len(data) || err != nil {
+ t.Fatalf("s.Write() = %v, %v; want %v, nil", n, err, len(data))
+ }
+ // Wait for the stream to finish writing whatever frames it can before
+ // congestion control blocks it.
+ tc.wait()
+ }
+
+ sent := make([]int64, len(streams))
+ for {
+ p := tc.readPacket()
+ if p == nil {
+ break
+ }
+ tc.writeFrames(packetType1RTT, debugFrameAck{
+ ranges: []i64range[packetNumber]{{0, p.num}},
+ })
+ for _, f := range p.frames {
+ sf, ok := f.(debugFrameStream)
+ if !ok {
+ t.Fatalf("got unexpected frame (want STREAM): %v", sf)
+ }
+ if got, want := sf.off, sent[sf.id.num()]; got != want {
+ t.Fatalf("got frame: %v\nwant offset: %v", sf, want)
+ }
+ sent[sf.id.num()] = sf.off + int64(len(sf.data))
+ // Look at the amount of data sent by all streams, excluding the first one.
+ // (The first stream got a head start when it consumed the initial window.)
+ //
+ // We expect that difference between the streams making the most and least progress
+ // so far will be less than the maximum datagram size.
+ minSent := sent[1]
+ maxSent := sent[1]
+ for _, s := range sent[2:] {
+ minSent = min(minSent, s)
+ maxSent = max(maxSent, s)
+ }
+ const maxDelta = maxUDPPayloadSize
+ if d := maxSent - minSent; d > maxDelta {
+ t.Fatalf("stream data sent: %v; delta=%v, want delta <= %v", sent, d, maxDelta)
+ }
+ }
+ }
+ // Final check that every stream sent the full amount of data expected.
+ for num, s := range sent {
+ if s != dataLen {
+ t.Errorf("stream %v sent %v bytes, want %v", num, s, dataLen)
+ }
+ }
+}
+
+func TestStreamsShutdown(t *testing.T) {
+ // These tests verify that a stream is removed from the Conn's map of live streams
+ // after it is fully shut down.
+ //
+ // Each case consists of a setup step, after which one stream should exist,
+ // and a shutdown step, after which no streams should remain in the Conn.
+ for _, test := range []struct {
+ name string
+ side streamSide
+ styp streamType
+ setup func(*testing.T, *testConn, *Stream)
+ shutdown func(*testing.T, *testConn, *Stream)
+ }{{
+ name: "closed",
+ side: localStream,
+ styp: uniStream,
+ setup: func(t *testing.T, tc *testConn, s *Stream) {
+ s.Close()
+ },
+ shutdown: func(t *testing.T, tc *testConn, s *Stream) {
+ tc.writeAckForAll()
+ },
+ }, {
+ name: "local close",
+ side: localStream,
+ styp: bidiStream,
+ setup: func(t *testing.T, tc *testConn, s *Stream) {
+ tc.writeFrames(packetType1RTT, debugFrameResetStream{
+ id: s.id,
+ })
+ s.Close()
+ },
+ shutdown: func(t *testing.T, tc *testConn, s *Stream) {
+ tc.writeAckForAll()
+ },
+ }, {
+ name: "remote reset",
+ side: localStream,
+ styp: bidiStream,
+ setup: func(t *testing.T, tc *testConn, s *Stream) {
+ s.Close()
+ tc.wantIdle("all frames after Close are ignored")
+ tc.writeAckForAll()
+ },
+ shutdown: func(t *testing.T, tc *testConn, s *Stream) {
+ tc.writeFrames(packetType1RTT, debugFrameResetStream{
+ id: s.id,
+ })
+ },
+ }, {
+ name: "local close",
+ side: remoteStream,
+ styp: uniStream,
+ setup: func(t *testing.T, tc *testConn, s *Stream) {
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: s.id,
+ fin: true,
+ })
+ if n, err := s.Read(make([]byte, 16)); n != 0 || err != io.EOF {
+ t.Errorf("Read() = %v, %v; want 0, io.EOF", n, err)
+ }
+ },
+ shutdown: func(t *testing.T, tc *testConn, s *Stream) {
+ s.CloseRead()
+ },
+ }} {
+ name := fmt.Sprintf("%v/%v/%v", test.side, test.styp, test.name)
+ t.Run(name, func(t *testing.T) {
+ tc, s := newTestConnAndStream(t, serverSide, test.side, test.styp,
+ permissiveTransportParameters)
+ tc.ignoreFrame(frameTypeStreamBase)
+ tc.ignoreFrame(frameTypeStopSending)
+ test.setup(t, tc, s)
+ tc.wantIdle("conn should be idle after setup")
+ if got, want := len(tc.conn.streams.streams), 1; got != want {
+ t.Fatalf("after setup: %v streams in Conn's map; want %v", got, want)
+ }
+ test.shutdown(t, tc, s)
+ tc.wantIdle("conn should be idle after shutdown")
+ if got, want := len(tc.conn.streams.streams), 0; got != want {
+ t.Fatalf("after shutdown: %v streams in Conn's map; want %v", got, want)
+ }
+ })
+ }
+}
+
+func TestStreamsCreateAndCloseRemote(t *testing.T) {
+ // This test exercises creating new streams in response to frames
+ // from the peer, and cleaning up after streams are fully closed.
+ //
+ // It's overfitted to the current implementation, but works through
+ // a number of corner cases in that implementation.
+ //
+ // Disable verbose logging in this test: It sends a lot of packets,
+ // and they're not especially interesting on their own.
+ defer func(vv bool) {
+ *testVV = vv
+ }(*testVV)
+ *testVV = false
+ ctx := canceledContext()
+ tc := newTestConn(t, serverSide, permissiveTransportParameters)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ type op struct {
+ id streamID
+ }
+ type streamOp op
+ type resetOp op
+ type acceptOp op
+ const noStream = math.MaxInt64
+ stringID := func(id streamID) string {
+ return fmt.Sprintf("%v/%v", id.streamType(), id.num())
+ }
+ for _, op := range []any{
+ "opening bidi/5 implicitly opens bidi/0-4",
+ streamOp{newStreamID(clientSide, bidiStream, 5)},
+ acceptOp{newStreamID(clientSide, bidiStream, 5)},
+ "bidi/3 was implicitly opened",
+ streamOp{newStreamID(clientSide, bidiStream, 3)},
+ acceptOp{newStreamID(clientSide, bidiStream, 3)},
+ resetOp{newStreamID(clientSide, bidiStream, 3)},
+ "bidi/3 is done, frames for it are discarded",
+ streamOp{newStreamID(clientSide, bidiStream, 3)},
+ "open and close some uni streams as well",
+ streamOp{newStreamID(clientSide, uniStream, 0)},
+ acceptOp{newStreamID(clientSide, uniStream, 0)},
+ streamOp{newStreamID(clientSide, uniStream, 1)},
+ acceptOp{newStreamID(clientSide, uniStream, 1)},
+ streamOp{newStreamID(clientSide, uniStream, 2)},
+ acceptOp{newStreamID(clientSide, uniStream, 2)},
+ resetOp{newStreamID(clientSide, uniStream, 1)},
+ resetOp{newStreamID(clientSide, uniStream, 0)},
+ resetOp{newStreamID(clientSide, uniStream, 2)},
+ "closing an implicitly opened stream causes us to accept it",
+ resetOp{newStreamID(clientSide, bidiStream, 0)},
+ acceptOp{newStreamID(clientSide, bidiStream, 0)},
+ resetOp{newStreamID(clientSide, bidiStream, 1)},
+ acceptOp{newStreamID(clientSide, bidiStream, 1)},
+ resetOp{newStreamID(clientSide, bidiStream, 2)},
+ acceptOp{newStreamID(clientSide, bidiStream, 2)},
+ "stream bidi/3 was reset previously",
+ resetOp{newStreamID(clientSide, bidiStream, 3)},
+ resetOp{newStreamID(clientSide, bidiStream, 4)},
+ acceptOp{newStreamID(clientSide, bidiStream, 4)},
+ "stream bidi/5 was reset previously",
+ resetOp{newStreamID(clientSide, bidiStream, 5)},
+ "stream bidi/6 was not implicitly opened",
+ resetOp{newStreamID(clientSide, bidiStream, 6)},
+ acceptOp{newStreamID(clientSide, bidiStream, 6)},
+ } {
+ if _, ok := op.(acceptOp); !ok {
+ if s, err := tc.conn.AcceptStream(ctx); err == nil {
+ t.Fatalf("accepted stream %v, want none", stringID(s.id))
+ }
+ }
+ switch op := op.(type) {
+ case string:
+ t.Log("# " + op)
+ case streamOp:
+ t.Logf("open stream %v", stringID(op.id))
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: streamID(op.id),
+ })
+ case resetOp:
+ t.Logf("reset stream %v", stringID(op.id))
+ tc.writeFrames(packetType1RTT, debugFrameResetStream{
+ id: op.id,
+ })
+ case acceptOp:
+ s := tc.acceptStream()
+ if s.id != op.id {
+ t.Fatalf("accepted stream %v; want stream %v", stringID(s.id), stringID(op.id))
+ }
+ t.Logf("accepted stream %v", stringID(op.id))
+ // Immediately close the stream, so the stream becomes done when the
+ // peer closes its end.
+ s.Close()
+ }
+ p := tc.readPacket()
+ if p != nil {
+ tc.writeFrames(p.ptype, debugFrameAck{
+ ranges: []i64range[packetNumber]{{0, p.num + 1}},
+ })
+ }
+ }
+ // Every stream should be fully closed now.
+ // Check that we don't have any state left.
+ if got := len(tc.conn.streams.streams); got != 0 {
+ t.Fatalf("after test, len(tc.conn.streams.streams) = %v, want 0", got)
+ }
+ if tc.conn.streams.queueMeta.head != nil {
+ t.Fatalf("after test, stream send queue is not empty; should be")
+ }
+}
+
+func TestStreamsCreateConcurrency(t *testing.T) {
+ cli, srv := newLocalConnPair(t, &Config{}, &Config{})
+
+ srvdone := make(chan int)
+ go func() {
+ defer close(srvdone)
+ for streams := 0; ; streams++ {
+ s, err := srv.AcceptStream(context.Background())
+ if err != nil {
+ srvdone <- streams
+ return
+ }
+ s.Close()
+ }
+ }()
+
+ var wg sync.WaitGroup
+ const concurrency = 10
+ const streams = 10
+ for i := 0; i < concurrency; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for j := 0; j < streams; j++ {
+ s, err := cli.NewStream(context.Background())
+ if err != nil {
+ t.Errorf("NewStream: %v", err)
+ return
+ }
+ s.Flush()
+ _, err = io.ReadAll(s)
+ if err != nil {
+ t.Errorf("ReadFull: %v", err)
+ }
+ s.Close()
+ }
+ }()
+ }
+ wg.Wait()
+
+ cli.Abort(nil)
+ srv.Abort(nil)
+ if got, want := <-srvdone, concurrency*streams; got != want {
+ t.Errorf("accepted %v streams, want %v", got, want)
+ }
+}
+
+func TestStreamsPTOWithImplicitStream(t *testing.T) {
+ ctx := canceledContext()
+ tc := newTestConn(t, serverSide, permissiveTransportParameters)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+
+ // Peer creates stream 1, and implicitly creates stream 0.
+ tc.writeFrames(packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, bidiStream, 1),
+ })
+
+ // We accept stream 1 and write data to it.
+ data := []byte("data")
+ s, err := tc.conn.AcceptStream(ctx)
+ if err != nil {
+ t.Fatalf("conn.AcceptStream() = %v, want stream", err)
+ }
+ s.Write(data)
+ s.Flush()
+ tc.wantFrame("data written to stream",
+ packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, bidiStream, 1),
+ data: data,
+ })
+
+ // PTO expires, and the data is resent.
+ const pto = true
+ tc.triggerLossOrPTO(packetType1RTT, true)
+ tc.wantFrame("data resent after PTO expires",
+ packetType1RTT, debugFrameStream{
+ id: newStreamID(clientSide, bidiStream, 1),
+ data: data,
+ })
+}
diff --git a/quic/conn_test.go b/quic/conn_test.go
new file mode 100644
index 000000000..f4f1818a6
--- /dev/null
+++ b/quic/conn_test.go
@@ -0,0 +1,1164 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/netip"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "golang.org/x/net/quic/qlog"
+)
+
+var (
+ testVV = flag.Bool("vv", false, "even more verbose test output")
+ qlogdir = flag.String("qlog", "", "write qlog logs to directory")
+)
+
+func TestConnTestConn(t *testing.T) {
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+ if got, want := tc.timeUntilEvent(), defaultMaxIdleTimeout; got != want {
+ t.Errorf("new conn timeout=%v, want %v (max_idle_timeout)", got, want)
+ }
+
+ ranAt, _ := runAsync(tc, func(ctx context.Context) (when time.Time, _ error) {
+ tc.conn.runOnLoop(ctx, func(now time.Time, c *Conn) {
+ when = now
+ })
+ return
+ }).result()
+ if !ranAt.Equal(tc.endpoint.now) {
+ t.Errorf("func ran on loop at %v, want %v", ranAt, tc.endpoint.now)
+ }
+ tc.wait()
+
+ nextTime := tc.endpoint.now.Add(defaultMaxIdleTimeout / 2)
+ tc.advanceTo(nextTime)
+ ranAt, _ = runAsync(tc, func(ctx context.Context) (when time.Time, _ error) {
+ tc.conn.runOnLoop(ctx, func(now time.Time, c *Conn) {
+ when = now
+ })
+ return
+ }).result()
+ if !ranAt.Equal(nextTime) {
+ t.Errorf("func ran on loop at %v, want %v", ranAt, nextTime)
+ }
+ tc.wait()
+
+ tc.advanceToTimer()
+ if got := tc.conn.lifetime.state; got != connStateDone {
+ t.Errorf("after advancing to idle timeout, conn state = %v, want done", got)
+ }
+}
+
+type testDatagram struct {
+ packets []*testPacket
+ paddedSize int
+ addr netip.AddrPort
+}
+
+func (d testDatagram) String() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, "datagram with %v packets", len(d.packets))
+ if d.paddedSize > 0 {
+ fmt.Fprintf(&b, " (padded to %v bytes)", d.paddedSize)
+ }
+ b.WriteString(":")
+ for _, p := range d.packets {
+ b.WriteString("\n")
+ b.WriteString(p.String())
+ }
+ return b.String()
+}
+
+type testPacket struct {
+ ptype packetType
+ header byte
+ version uint32
+ num packetNumber
+ keyPhaseBit bool
+ keyNumber int
+ dstConnID []byte
+ srcConnID []byte
+ token []byte
+ originalDstConnID []byte // used for encoding Retry packets
+ frames []debugFrame
+}
+
+func (p testPacket) String() string {
+ var b strings.Builder
+ fmt.Fprintf(&b, " %v %v", p.ptype, p.num)
+ if p.version != 0 {
+ fmt.Fprintf(&b, " version=%v", p.version)
+ }
+ if p.srcConnID != nil {
+ fmt.Fprintf(&b, " src={%x}", p.srcConnID)
+ }
+ if p.dstConnID != nil {
+ fmt.Fprintf(&b, " dst={%x}", p.dstConnID)
+ }
+ if p.token != nil {
+ fmt.Fprintf(&b, " token={%x}", p.token)
+ }
+ for _, f := range p.frames {
+ fmt.Fprintf(&b, "\n %v", f)
+ }
+ return b.String()
+}
+
+// maxTestKeyPhases is the maximum number of 1-RTT keys we'll generate in a test.
+const maxTestKeyPhases = 3
+
+// A testConn is a Conn whose external interactions (sending and receiving packets,
+// setting timers) can be manipulated in tests.
+type testConn struct {
+ t *testing.T
+ conn *Conn
+ endpoint *testEndpoint
+ timer time.Time
+ timerLastFired time.Time
+ idlec chan struct{} // only accessed on the conn's loop
+
+ // Keys are distinct from the conn's keys,
+ // because the test may know about keys before the conn does.
+ // For example, when sending a datagram with coalesced
+ // Initial and Handshake packets to a client conn,
+ // we use Handshake keys to encrypt the packet.
+ // The client only acquires those keys when it processes
+ // the Initial packet.
+ keysInitial fixedKeyPair
+ keysHandshake fixedKeyPair
+ rkeyAppData test1RTTKeys
+ wkeyAppData test1RTTKeys
+ rsecrets [numberSpaceCount]keySecret
+ wsecrets [numberSpaceCount]keySecret
+
+ // testConn uses a test hook to snoop on the conn's TLS events.
+ // CRYPTO data produced by the conn's QUICConn is placed in
+ // cryptoDataOut.
+ //
+ // The peerTLSConn is is a QUICConn representing the peer.
+ // CRYPTO data produced by the conn is written to peerTLSConn,
+ // and data produced by peerTLSConn is placed in cryptoDataIn.
+ cryptoDataOut map[tls.QUICEncryptionLevel][]byte
+ cryptoDataIn map[tls.QUICEncryptionLevel][]byte
+ peerTLSConn *tls.QUICConn
+
+ // Information about the conn's (fake) peer.
+ peerConnID []byte // source conn id of peer's packets
+ peerNextPacketNum [numberSpaceCount]packetNumber // next packet number to use
+
+ // Datagrams, packets, and frames sent by the conn,
+ // but not yet processed by the test.
+ sentDatagrams [][]byte
+ sentPackets []*testPacket
+ sentFrames []debugFrame
+ lastDatagram *testDatagram
+ lastPacket *testPacket
+
+ recvDatagram chan *datagram
+
+ // Transport parameters sent by the conn.
+ sentTransportParameters *transportParameters
+
+ // Frame types to ignore in tests.
+ ignoreFrames map[byte]bool
+
+ // Values to set in packets sent to the conn.
+ sendKeyNumber int
+ sendKeyPhaseBit bool
+
+ asyncTestState
+}
+
+type test1RTTKeys struct {
+ hdr headerKey
+ pkt [maxTestKeyPhases]packetKey
+}
+
+type keySecret struct {
+ suite uint16
+ secret []byte
+}
+
+// newTestConn creates a Conn for testing.
+//
+// The Conn's event loop is controlled by the test,
+// allowing test code to access Conn state directly
+// by first ensuring the loop goroutine is idle.
+func newTestConn(t *testing.T, side connSide, opts ...any) *testConn {
+ t.Helper()
+ config := &Config{
+ TLSConfig: newTestTLSConfig(side),
+ StatelessResetKey: testStatelessResetKey,
+ QLogLogger: slog.New(qlog.NewJSONHandler(qlog.HandlerOptions{
+ Level: QLogLevelFrame,
+ Dir: *qlogdir,
+ })),
+ }
+ var cids newServerConnIDs
+ if side == serverSide {
+ // The initial connection ID for the server is chosen by the client.
+ cids.srcConnID = testPeerConnID(0)
+ cids.dstConnID = testPeerConnID(-1)
+ cids.originalDstConnID = cids.dstConnID
+ }
+ var configTransportParams []func(*transportParameters)
+ var configTestConn []func(*testConn)
+ for _, o := range opts {
+ switch o := o.(type) {
+ case func(*Config):
+ o(config)
+ case func(*tls.Config):
+ o(config.TLSConfig)
+ case func(cids *newServerConnIDs):
+ o(&cids)
+ case func(p *transportParameters):
+ configTransportParams = append(configTransportParams, o)
+ case func(p *testConn):
+ configTestConn = append(configTestConn, o)
+ default:
+ t.Fatalf("unknown newTestConn option %T", o)
+ }
+ }
+
+ endpoint := newTestEndpoint(t, config)
+ endpoint.configTransportParams = configTransportParams
+ endpoint.configTestConn = configTestConn
+ conn, err := endpoint.e.newConn(
+ endpoint.now,
+ config,
+ side,
+ cids,
+ "",
+ netip.MustParseAddrPort("127.0.0.1:443"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ tc := endpoint.conns[conn]
+ tc.wait()
+ return tc
+}
+
+func newTestConnForConn(t *testing.T, endpoint *testEndpoint, conn *Conn) *testConn {
+ t.Helper()
+ tc := &testConn{
+ t: t,
+ endpoint: endpoint,
+ conn: conn,
+ peerConnID: testPeerConnID(0),
+ ignoreFrames: map[byte]bool{
+ frameTypePadding: true, // ignore PADDING by default
+ },
+ cryptoDataOut: make(map[tls.QUICEncryptionLevel][]byte),
+ cryptoDataIn: make(map[tls.QUICEncryptionLevel][]byte),
+ recvDatagram: make(chan *datagram),
+ }
+ t.Cleanup(tc.cleanup)
+ for _, f := range endpoint.configTestConn {
+ f(tc)
+ }
+ conn.testHooks = (*testConnHooks)(tc)
+
+ if endpoint.peerTLSConn != nil {
+ tc.peerTLSConn = endpoint.peerTLSConn
+ endpoint.peerTLSConn = nil
+ return tc
+ }
+
+ peerProvidedParams := defaultTransportParameters()
+ peerProvidedParams.initialSrcConnID = testPeerConnID(0)
+ if conn.side == clientSide {
+ peerProvidedParams.originalDstConnID = testLocalConnID(-1)
+ }
+ for _, f := range endpoint.configTransportParams {
+ f(&peerProvidedParams)
+ }
+
+ peerQUICConfig := &tls.QUICConfig{TLSConfig: newTestTLSConfig(conn.side.peer())}
+ if conn.side == clientSide {
+ tc.peerTLSConn = tls.QUICServer(peerQUICConfig)
+ } else {
+ tc.peerTLSConn = tls.QUICClient(peerQUICConfig)
+ }
+ tc.peerTLSConn.SetTransportParameters(marshalTransportParameters(peerProvidedParams))
+ tc.peerTLSConn.Start(context.Background())
+ t.Cleanup(func() {
+ tc.peerTLSConn.Close()
+ })
+
+ return tc
+}
+
+// advance causes time to pass.
+func (tc *testConn) advance(d time.Duration) {
+ tc.t.Helper()
+ tc.endpoint.advance(d)
+}
+
+// advanceTo sets the current time.
+func (tc *testConn) advanceTo(now time.Time) {
+ tc.t.Helper()
+ tc.endpoint.advanceTo(now)
+}
+
+// advanceToTimer sets the current time to the time of the Conn's next timer event.
+func (tc *testConn) advanceToTimer() {
+ if tc.timer.IsZero() {
+ tc.t.Fatalf("advancing to timer, but timer is not set")
+ }
+ tc.advanceTo(tc.timer)
+}
+
+func (tc *testConn) timerDelay() time.Duration {
+ if tc.timer.IsZero() {
+ return math.MaxInt64 // infinite
+ }
+ if tc.timer.Before(tc.endpoint.now) {
+ return 0
+ }
+ return tc.timer.Sub(tc.endpoint.now)
+}
+
+const infiniteDuration = time.Duration(math.MaxInt64)
+
+// timeUntilEvent returns the amount of time until the next connection event.
+func (tc *testConn) timeUntilEvent() time.Duration {
+ if tc.timer.IsZero() {
+ return infiniteDuration
+ }
+ if tc.timer.Before(tc.endpoint.now) {
+ return 0
+ }
+ return tc.timer.Sub(tc.endpoint.now)
+}
+
+// wait blocks until the conn becomes idle.
+// The conn is idle when it is blocked waiting for a packet to arrive or a timer to expire.
+// Tests shouldn't need to call wait directly.
+// testConn methods that wake the Conn event loop will call wait for them.
+func (tc *testConn) wait() {
+ tc.t.Helper()
+ idlec := make(chan struct{})
+ fail := false
+ tc.conn.sendMsg(func(now time.Time, c *Conn) {
+ if tc.idlec != nil {
+ tc.t.Errorf("testConn.wait called concurrently")
+ fail = true
+ close(idlec)
+ } else {
+ // nextMessage will close idlec.
+ tc.idlec = idlec
+ }
+ })
+ select {
+ case <-idlec:
+ case <-tc.conn.donec:
+ // We may have async ops that can proceed now that the conn is done.
+ tc.wakeAsync()
+ }
+ if fail {
+ panic(fail)
+ }
+}
+
+func (tc *testConn) cleanup() {
+ if tc.conn == nil {
+ return
+ }
+ tc.conn.exit()
+ <-tc.conn.donec
+}
+
+func (tc *testConn) acceptStream() *Stream {
+ tc.t.Helper()
+ s, err := tc.conn.AcceptStream(canceledContext())
+ if err != nil {
+ tc.t.Fatalf("conn.AcceptStream() = %v, want stream", err)
+ }
+ s.SetReadContext(canceledContext())
+ s.SetWriteContext(canceledContext())
+ return s
+}
+
+func logDatagram(t *testing.T, text string, d *testDatagram) {
+ t.Helper()
+ if !*testVV {
+ return
+ }
+ pad := ""
+ if d.paddedSize > 0 {
+ pad = fmt.Sprintf(" (padded to %v)", d.paddedSize)
+ }
+ t.Logf("%v datagram%v", text, pad)
+ for _, p := range d.packets {
+ var s string
+ switch p.ptype {
+ case packetType1RTT:
+ s = fmt.Sprintf(" %v pnum=%v", p.ptype, p.num)
+ default:
+ s = fmt.Sprintf(" %v pnum=%v ver=%v dst={%x} src={%x}", p.ptype, p.num, p.version, p.dstConnID, p.srcConnID)
+ }
+ if p.token != nil {
+ s += fmt.Sprintf(" token={%x}", p.token)
+ }
+ if p.keyPhaseBit {
+ s += fmt.Sprintf(" KeyPhase")
+ }
+ if p.keyNumber != 0 {
+ s += fmt.Sprintf(" keynum=%v", p.keyNumber)
+ }
+ t.Log(s)
+ for _, f := range p.frames {
+ t.Logf(" %v", f)
+ }
+ }
+}
+
+// write sends the Conn a datagram.
+func (tc *testConn) write(d *testDatagram) {
+ tc.t.Helper()
+ tc.endpoint.writeDatagram(d)
+}
+
+// writeFrame sends the Conn a datagram containing the given frames.
+func (tc *testConn) writeFrames(ptype packetType, frames ...debugFrame) {
+ tc.t.Helper()
+ space := spaceForPacketType(ptype)
+ dstConnID := tc.conn.connIDState.local[0].cid
+ if tc.conn.connIDState.local[0].seq == -1 && ptype != packetTypeInitial {
+ // Only use the transient connection ID in Initial packets.
+ dstConnID = tc.conn.connIDState.local[1].cid
+ }
+ d := &testDatagram{
+ packets: []*testPacket{{
+ ptype: ptype,
+ num: tc.peerNextPacketNum[space],
+ keyNumber: tc.sendKeyNumber,
+ keyPhaseBit: tc.sendKeyPhaseBit,
+ frames: frames,
+ version: quicVersion1,
+ dstConnID: dstConnID,
+ srcConnID: tc.peerConnID,
+ }},
+ addr: tc.conn.peerAddr,
+ }
+ if ptype == packetTypeInitial && tc.conn.side == serverSide {
+ d.paddedSize = 1200
+ }
+ tc.write(d)
+}
+
+// writeAckForAll sends the Conn a datagram containing an ack for all packets up to the
+// last one received.
+func (tc *testConn) writeAckForAll() {
+ tc.t.Helper()
+ if tc.lastPacket == nil {
+ return
+ }
+ tc.writeFrames(tc.lastPacket.ptype, debugFrameAck{
+ ranges: []i64range[packetNumber]{{0, tc.lastPacket.num + 1}},
+ })
+}
+
+// writeAckForLatest sends the Conn a datagram containing an ack for the
+// most recent packet received.
+func (tc *testConn) writeAckForLatest() {
+ tc.t.Helper()
+ if tc.lastPacket == nil {
+ return
+ }
+ tc.writeFrames(tc.lastPacket.ptype, debugFrameAck{
+ ranges: []i64range[packetNumber]{{tc.lastPacket.num, tc.lastPacket.num + 1}},
+ })
+}
+
+// ignoreFrame hides frames of the given type sent by the Conn.
+func (tc *testConn) ignoreFrame(frameType byte) {
+ tc.ignoreFrames[frameType] = true
+}
+
+// readDatagram reads the next datagram sent by the Conn.
+// It returns nil if the Conn has no more datagrams to send at this time.
+func (tc *testConn) readDatagram() *testDatagram {
+ tc.t.Helper()
+ tc.wait()
+ tc.sentPackets = nil
+ tc.sentFrames = nil
+ buf := tc.endpoint.read()
+ if buf == nil {
+ return nil
+ }
+ d := parseTestDatagram(tc.t, tc.endpoint, tc, buf)
+ // Log the datagram before removing ignored frames.
+ // When things go wrong, it's useful to see all the frames.
+ logDatagram(tc.t, "-> conn under test sends", d)
+ typeForFrame := func(f debugFrame) byte {
+ // This is very clunky, and points at a problem
+ // in how we specify what frames to ignore in tests.
+ //
+ // We mark frames to ignore using the frame type,
+ // but we've got a debugFrame data structure here.
+ // Perhaps we should be ignoring frames by debugFrame
+ // type instead: tc.ignoreFrame[debugFrameAck]().
+ switch f := f.(type) {
+ case debugFramePadding:
+ return frameTypePadding
+ case debugFramePing:
+ return frameTypePing
+ case debugFrameAck:
+ return frameTypeAck
+ case debugFrameResetStream:
+ return frameTypeResetStream
+ case debugFrameStopSending:
+ return frameTypeStopSending
+ case debugFrameCrypto:
+ return frameTypeCrypto
+ case debugFrameNewToken:
+ return frameTypeNewToken
+ case debugFrameStream:
+ return frameTypeStreamBase
+ case debugFrameMaxData:
+ return frameTypeMaxData
+ case debugFrameMaxStreamData:
+ return frameTypeMaxStreamData
+ case debugFrameMaxStreams:
+ if f.streamType == bidiStream {
+ return frameTypeMaxStreamsBidi
+ } else {
+ return frameTypeMaxStreamsUni
+ }
+ case debugFrameDataBlocked:
+ return frameTypeDataBlocked
+ case debugFrameStreamDataBlocked:
+ return frameTypeStreamDataBlocked
+ case debugFrameStreamsBlocked:
+ if f.streamType == bidiStream {
+ return frameTypeStreamsBlockedBidi
+ } else {
+ return frameTypeStreamsBlockedUni
+ }
+ case debugFrameNewConnectionID:
+ return frameTypeNewConnectionID
+ case debugFrameRetireConnectionID:
+ return frameTypeRetireConnectionID
+ case debugFramePathChallenge:
+ return frameTypePathChallenge
+ case debugFramePathResponse:
+ return frameTypePathResponse
+ case debugFrameConnectionCloseTransport:
+ return frameTypeConnectionCloseTransport
+ case debugFrameConnectionCloseApplication:
+ return frameTypeConnectionCloseApplication
+ case debugFrameHandshakeDone:
+ return frameTypeHandshakeDone
+ }
+ panic(fmt.Errorf("unhandled frame type %T", f))
+ }
+ for _, p := range d.packets {
+ var frames []debugFrame
+ for _, f := range p.frames {
+ if !tc.ignoreFrames[typeForFrame(f)] {
+ frames = append(frames, f)
+ }
+ }
+ p.frames = frames
+ }
+ tc.lastDatagram = d
+ return d
+}
+
+// readPacket reads the next packet sent by the Conn.
+// It returns nil if the Conn has no more packets to send at this time.
+func (tc *testConn) readPacket() *testPacket {
+ tc.t.Helper()
+ for len(tc.sentPackets) == 0 {
+ d := tc.readDatagram()
+ if d == nil {
+ return nil
+ }
+ for _, p := range d.packets {
+ if len(p.frames) == 0 {
+ tc.lastPacket = p
+ continue
+ }
+ tc.sentPackets = append(tc.sentPackets, p)
+ }
+ }
+ p := tc.sentPackets[0]
+ tc.sentPackets = tc.sentPackets[1:]
+ tc.lastPacket = p
+ return p
+}
+
+// readFrame reads the next frame sent by the Conn.
+// It returns nil if the Conn has no more frames to send at this time.
+func (tc *testConn) readFrame() (debugFrame, packetType) {
+ tc.t.Helper()
+ for len(tc.sentFrames) == 0 {
+ p := tc.readPacket()
+ if p == nil {
+ return nil, packetTypeInvalid
+ }
+ tc.sentFrames = p.frames
+ }
+ f := tc.sentFrames[0]
+ tc.sentFrames = tc.sentFrames[1:]
+ return f, tc.lastPacket.ptype
+}
+
+// wantDatagram indicates that we expect the Conn to send a datagram.
+func (tc *testConn) wantDatagram(expectation string, want *testDatagram) {
+ tc.t.Helper()
+ got := tc.readDatagram()
+ if !datagramEqual(got, want) {
+ tc.t.Fatalf("%v:\ngot datagram: %v\nwant datagram: %v", expectation, got, want)
+ }
+}
+
+func datagramEqual(a, b *testDatagram) bool {
+ if a == nil && b == nil {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ if a.paddedSize != b.paddedSize ||
+ a.addr != b.addr ||
+ len(a.packets) != len(b.packets) {
+ return false
+ }
+ for i := range a.packets {
+ if !packetEqual(a.packets[i], b.packets[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// wantPacket indicates that we expect the Conn to send a packet.
+func (tc *testConn) wantPacket(expectation string, want *testPacket) {
+ tc.t.Helper()
+ got := tc.readPacket()
+ if !packetEqual(got, want) {
+ tc.t.Fatalf("%v:\ngot packet: %v\nwant packet: %v", expectation, got, want)
+ }
+}
+
+func packetEqual(a, b *testPacket) bool {
+ if a == nil && b == nil {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ ac := *a
+ ac.frames = nil
+ ac.header = 0
+ bc := *b
+ bc.frames = nil
+ bc.header = 0
+ if !reflect.DeepEqual(ac, bc) {
+ return false
+ }
+ if len(a.frames) != len(b.frames) {
+ return false
+ }
+ for i := range a.frames {
+ if !frameEqual(a.frames[i], b.frames[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// wantFrame indicates that we expect the Conn to send a frame.
+func (tc *testConn) wantFrame(expectation string, wantType packetType, want debugFrame) {
+ tc.t.Helper()
+ got, gotType := tc.readFrame()
+ if got == nil {
+ tc.t.Fatalf("%v:\nconnection is idle\nwant %v frame: %v", expectation, wantType, want)
+ }
+ if gotType != wantType {
+ tc.t.Fatalf("%v:\ngot %v packet, want %v\ngot frame: %v", expectation, gotType, wantType, got)
+ }
+ if !frameEqual(got, want) {
+ tc.t.Fatalf("%v:\ngot frame: %v\nwant frame: %v", expectation, got, want)
+ }
+}
+
+func frameEqual(a, b debugFrame) bool {
+ switch af := a.(type) {
+ case debugFrameConnectionCloseTransport:
+ bf, ok := b.(debugFrameConnectionCloseTransport)
+ return ok && af.code == bf.code
+ }
+ return reflect.DeepEqual(a, b)
+}
+
+// wantFrameType indicates that we expect the Conn to send a frame,
+// although we don't care about the contents.
+func (tc *testConn) wantFrameType(expectation string, wantType packetType, want debugFrame) {
+ tc.t.Helper()
+ got, gotType := tc.readFrame()
+ if got == nil {
+ tc.t.Fatalf("%v:\nconnection is idle\nwant %v frame: %v", expectation, wantType, want)
+ }
+ if gotType != wantType {
+ tc.t.Fatalf("%v:\ngot %v packet, want %v\ngot frame: %v", expectation, gotType, wantType, got)
+ }
+ if reflect.TypeOf(got) != reflect.TypeOf(want) {
+ tc.t.Fatalf("%v:\ngot frame: %v\nwant frame of type: %v", expectation, got, want)
+ }
+}
+
+// wantIdle indicates that we expect the Conn to not send any more frames.
+func (tc *testConn) wantIdle(expectation string) {
+ tc.t.Helper()
+ switch {
+ case len(tc.sentFrames) > 0:
+ tc.t.Fatalf("expect: %v\nunexpectedly got: %v", expectation, tc.sentFrames[0])
+ case len(tc.sentPackets) > 0:
+ tc.t.Fatalf("expect: %v\nunexpectedly got: %v", expectation, tc.sentPackets[0])
+ }
+ if f, _ := tc.readFrame(); f != nil {
+ tc.t.Fatalf("expect: %v\nunexpectedly got: %v", expectation, f)
+ }
+}
+
+func encodeTestPacket(t *testing.T, tc *testConn, p *testPacket, pad int) []byte {
+ t.Helper()
+ var w packetWriter
+ w.reset(1200)
+ var pnumMaxAcked packetNumber
+ switch p.ptype {
+ case packetTypeRetry:
+ return encodeRetryPacket(p.originalDstConnID, retryPacket{
+ srcConnID: p.srcConnID,
+ dstConnID: p.dstConnID,
+ token: p.token,
+ })
+ case packetType1RTT:
+ w.start1RTTPacket(p.num, pnumMaxAcked, p.dstConnID)
+ default:
+ w.startProtectedLongHeaderPacket(pnumMaxAcked, longPacket{
+ ptype: p.ptype,
+ version: p.version,
+ num: p.num,
+ dstConnID: p.dstConnID,
+ srcConnID: p.srcConnID,
+ extra: p.token,
+ })
+ }
+ for _, f := range p.frames {
+ f.write(&w)
+ }
+ w.appendPaddingTo(pad)
+ if p.ptype != packetType1RTT {
+ var k fixedKeys
+ if tc == nil {
+ if p.ptype == packetTypeInitial {
+ k = initialKeys(p.dstConnID, serverSide).r
+ } else {
+ t.Fatalf("sending %v packet with no conn", p.ptype)
+ }
+ } else {
+ switch p.ptype {
+ case packetTypeInitial:
+ k = tc.keysInitial.w
+ case packetTypeHandshake:
+ k = tc.keysHandshake.w
+ }
+ }
+ if !k.isSet() {
+ t.Fatalf("sending %v packet with no write key", p.ptype)
+ }
+ w.finishProtectedLongHeaderPacket(pnumMaxAcked, k, longPacket{
+ ptype: p.ptype,
+ version: p.version,
+ num: p.num,
+ dstConnID: p.dstConnID,
+ srcConnID: p.srcConnID,
+ extra: p.token,
+ })
+ } else {
+ if tc == nil || !tc.wkeyAppData.hdr.isSet() {
+ t.Fatalf("sending 1-RTT packet with no write key")
+ }
+ // Somewhat hackish: Generate a temporary updatingKeyPair that will
+ // always use our desired key phase.
+ k := &updatingKeyPair{
+ w: updatingKeys{
+ hdr: tc.wkeyAppData.hdr,
+ pkt: [2]packetKey{
+ tc.wkeyAppData.pkt[p.keyNumber],
+ tc.wkeyAppData.pkt[p.keyNumber],
+ },
+ },
+ updateAfter: maxPacketNumber,
+ }
+ if p.keyPhaseBit {
+ k.phase |= keyPhaseBit
+ }
+ w.finish1RTTPacket(p.num, pnumMaxAcked, p.dstConnID, k)
+ }
+ return w.datagram()
+}
+
+func parseTestDatagram(t *testing.T, te *testEndpoint, tc *testConn, buf []byte) *testDatagram {
+ t.Helper()
+ bufSize := len(buf)
+ d := &testDatagram{}
+ size := len(buf)
+ for len(buf) > 0 {
+ if buf[0] == 0 {
+ d.paddedSize = bufSize
+ break
+ }
+ ptype := getPacketType(buf)
+ switch ptype {
+ case packetTypeRetry:
+ retry, ok := parseRetryPacket(buf, te.lastInitialDstConnID)
+ if !ok {
+ t.Fatalf("could not parse %v packet", ptype)
+ }
+ return &testDatagram{
+ packets: []*testPacket{{
+ ptype: packetTypeRetry,
+ dstConnID: retry.dstConnID,
+ srcConnID: retry.srcConnID,
+ token: retry.token,
+ }},
+ }
+ case packetTypeInitial, packetTypeHandshake:
+ var k fixedKeys
+ if tc == nil {
+ if ptype == packetTypeInitial {
+ p, _ := parseGenericLongHeaderPacket(buf)
+ k = initialKeys(p.srcConnID, serverSide).w
+ } else {
+ t.Fatalf("reading %v packet with no conn", ptype)
+ }
+ } else {
+ switch ptype {
+ case packetTypeInitial:
+ k = tc.keysInitial.r
+ case packetTypeHandshake:
+ k = tc.keysHandshake.r
+ }
+ }
+ if !k.isSet() {
+ t.Fatalf("reading %v packet with no read key", ptype)
+ }
+ var pnumMax packetNumber // TODO: Track packet numbers.
+ p, n := parseLongHeaderPacket(buf, k, pnumMax)
+ if n < 0 {
+ t.Fatalf("packet parse error")
+ }
+ frames, err := parseTestFrames(t, p.payload)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var token []byte
+ if ptype == packetTypeInitial && len(p.extra) > 0 {
+ token = p.extra
+ }
+ d.packets = append(d.packets, &testPacket{
+ ptype: p.ptype,
+ header: buf[0],
+ version: p.version,
+ num: p.num,
+ dstConnID: p.dstConnID,
+ srcConnID: p.srcConnID,
+ token: token,
+ frames: frames,
+ })
+ buf = buf[n:]
+ case packetType1RTT:
+ if tc == nil || !tc.rkeyAppData.hdr.isSet() {
+ t.Fatalf("reading 1-RTT packet with no read key")
+ }
+ var pnumMax packetNumber // TODO: Track packet numbers.
+ pnumOff := 1 + len(tc.peerConnID)
+ // Try unprotecting the packet with the first maxTestKeyPhases keys.
+ var phase int
+ var pnum packetNumber
+ var hdr []byte
+ var pay []byte
+ var err error
+ for phase = 0; phase < maxTestKeyPhases; phase++ {
+ b := append([]byte{}, buf...)
+ hdr, pay, pnum, err = tc.rkeyAppData.hdr.unprotect(b, pnumOff, pnumMax)
+ if err != nil {
+ t.Fatalf("1-RTT packet header parse error")
+ }
+ k := tc.rkeyAppData.pkt[phase]
+ pay, err = k.unprotect(hdr, pay, pnum)
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ t.Fatalf("1-RTT packet payload parse error")
+ }
+ frames, err := parseTestFrames(t, pay)
+ if err != nil {
+ t.Fatal(err)
+ }
+ d.packets = append(d.packets, &testPacket{
+ ptype: packetType1RTT,
+ header: hdr[0],
+ num: pnum,
+ dstConnID: hdr[1:][:len(tc.peerConnID)],
+ keyPhaseBit: hdr[0]&keyPhaseBit != 0,
+ keyNumber: phase,
+ frames: frames,
+ })
+ buf = buf[len(buf):]
+ default:
+ t.Fatalf("unhandled packet type %v", ptype)
+ }
+ }
+ // This is rather hackish: If the last frame in the last packet
+ // in the datagram is PADDING, then remove it and record
+ // the padded size in the testDatagram.paddedSize.
+ //
+ // This makes it easier to write a test that expects a datagram
+ // padded to 1200 bytes.
+ if len(d.packets) > 0 && len(d.packets[len(d.packets)-1].frames) > 0 {
+ p := d.packets[len(d.packets)-1]
+ f := p.frames[len(p.frames)-1]
+ if _, ok := f.(debugFramePadding); ok {
+ p.frames = p.frames[:len(p.frames)-1]
+ d.paddedSize = size
+ }
+ }
+ return d
+}
+
+func parseTestFrames(t *testing.T, payload []byte) ([]debugFrame, error) {
+ t.Helper()
+ var frames []debugFrame
+ for len(payload) > 0 {
+ f, n := parseDebugFrame(payload)
+ if n < 0 {
+ return nil, errors.New("error parsing frames")
+ }
+ frames = append(frames, f)
+ payload = payload[n:]
+ }
+ return frames, nil
+}
+
+func spaceForPacketType(ptype packetType) numberSpace {
+ switch ptype {
+ case packetTypeInitial:
+ return initialSpace
+ case packetType0RTT:
+ panic("TODO: packetType0RTT")
+ case packetTypeHandshake:
+ return handshakeSpace
+ case packetTypeRetry:
+ panic("retry packets have no number space")
+ case packetType1RTT:
+ return appDataSpace
+ }
+ panic("unknown packet type")
+}
+
+// testConnHooks implements connTestHooks.
+type testConnHooks testConn
+
+func (tc *testConnHooks) init() {
+ tc.conn.keysAppData.updateAfter = maxPacketNumber // disable key updates
+ tc.keysInitial.r = tc.conn.keysInitial.w
+ tc.keysInitial.w = tc.conn.keysInitial.r
+ if tc.conn.side == serverSide {
+ tc.endpoint.acceptQueue = append(tc.endpoint.acceptQueue, (*testConn)(tc))
+ }
+}
+
+// handleTLSEvent processes TLS events generated by
+// the connection under test's tls.QUICConn.
+//
+// We maintain a second tls.QUICConn representing the peer,
+// and feed the TLS handshake data into it.
+//
+// We stash TLS handshake data from both sides in the testConn,
+// where it can be used by tests.
+//
+// We snoop packet protection keys out of the tls.QUICConns,
+// and verify that both sides of the connection are getting
+// matching keys.
+func (tc *testConnHooks) handleTLSEvent(e tls.QUICEvent) {
+ checkKey := func(typ string, secrets *[numberSpaceCount]keySecret, e tls.QUICEvent) {
+ var space numberSpace
+ switch {
+ case e.Level == tls.QUICEncryptionLevelHandshake:
+ space = handshakeSpace
+ case e.Level == tls.QUICEncryptionLevelApplication:
+ space = appDataSpace
+ default:
+ tc.t.Errorf("unexpected encryption level %v", e.Level)
+ return
+ }
+ if secrets[space].secret == nil {
+ secrets[space].suite = e.Suite
+ secrets[space].secret = append([]byte{}, e.Data...)
+ } else if secrets[space].suite != e.Suite || !bytes.Equal(secrets[space].secret, e.Data) {
+ tc.t.Errorf("%v key mismatch for level for level %v", typ, e.Level)
+ }
+ }
+ setAppDataKey := func(suite uint16, secret []byte, k *test1RTTKeys) {
+ k.hdr.init(suite, secret)
+ for i := 0; i < len(k.pkt); i++ {
+ k.pkt[i].init(suite, secret)
+ secret = updateSecret(suite, secret)
+ }
+ }
+ switch e.Kind {
+ case tls.QUICSetReadSecret:
+ checkKey("write", &tc.wsecrets, e)
+ switch e.Level {
+ case tls.QUICEncryptionLevelHandshake:
+ tc.keysHandshake.w.init(e.Suite, e.Data)
+ case tls.QUICEncryptionLevelApplication:
+ setAppDataKey(e.Suite, e.Data, &tc.wkeyAppData)
+ }
+ case tls.QUICSetWriteSecret:
+ checkKey("read", &tc.rsecrets, e)
+ switch e.Level {
+ case tls.QUICEncryptionLevelHandshake:
+ tc.keysHandshake.r.init(e.Suite, e.Data)
+ case tls.QUICEncryptionLevelApplication:
+ setAppDataKey(e.Suite, e.Data, &tc.rkeyAppData)
+ }
+ case tls.QUICWriteData:
+ tc.cryptoDataOut[e.Level] = append(tc.cryptoDataOut[e.Level], e.Data...)
+ tc.peerTLSConn.HandleData(e.Level, e.Data)
+ }
+ for {
+ e := tc.peerTLSConn.NextEvent()
+ switch e.Kind {
+ case tls.QUICNoEvent:
+ return
+ case tls.QUICSetReadSecret:
+ checkKey("write", &tc.rsecrets, e)
+ switch e.Level {
+ case tls.QUICEncryptionLevelHandshake:
+ tc.keysHandshake.r.init(e.Suite, e.Data)
+ case tls.QUICEncryptionLevelApplication:
+ setAppDataKey(e.Suite, e.Data, &tc.rkeyAppData)
+ }
+ case tls.QUICSetWriteSecret:
+ checkKey("read", &tc.wsecrets, e)
+ switch e.Level {
+ case tls.QUICEncryptionLevelHandshake:
+ tc.keysHandshake.w.init(e.Suite, e.Data)
+ case tls.QUICEncryptionLevelApplication:
+ setAppDataKey(e.Suite, e.Data, &tc.wkeyAppData)
+ }
+ case tls.QUICWriteData:
+ tc.cryptoDataIn[e.Level] = append(tc.cryptoDataIn[e.Level], e.Data...)
+ case tls.QUICTransportParameters:
+ p, err := unmarshalTransportParams(e.Data)
+ if err != nil {
+ tc.t.Logf("sent unparseable transport parameters %x %v", e.Data, err)
+ } else {
+ tc.sentTransportParameters = &p
+ }
+ }
+ }
+}
+
+// nextMessage is called by the Conn's event loop to request its next event.
+func (tc *testConnHooks) nextMessage(msgc chan any, timer time.Time) (now time.Time, m any) {
+ tc.timer = timer
+ for {
+ if !timer.IsZero() && !timer.After(tc.endpoint.now) {
+ if timer.Equal(tc.timerLastFired) {
+ // If the connection timer fires at time T, the Conn should take some
+ // action to advance the timer into the future. If the Conn reschedules
+ // the timer for the same time, it isn't making progress and we have a bug.
+ tc.t.Errorf("connection timer spinning; now=%v timer=%v", tc.endpoint.now, timer)
+ } else {
+ tc.timerLastFired = timer
+ return tc.endpoint.now, timerEvent{}
+ }
+ }
+ select {
+ case m := <-msgc:
+ return tc.endpoint.now, m
+ default:
+ }
+ if !tc.wakeAsync() {
+ break
+ }
+ }
+ // If the message queue is empty, then the conn is idle.
+ if tc.idlec != nil {
+ idlec := tc.idlec
+ tc.idlec = nil
+ close(idlec)
+ }
+ m = <-msgc
+ return tc.endpoint.now, m
+}
+
+func (tc *testConnHooks) newConnID(seq int64) ([]byte, error) {
+ return testLocalConnID(seq), nil
+}
+
+func (tc *testConnHooks) timeNow() time.Time {
+ return tc.endpoint.now
+}
+
+// testLocalConnID returns the connection ID with a given sequence number
+// used by a Conn under test.
+func testLocalConnID(seq int64) []byte {
+ cid := make([]byte, connIDLen)
+ copy(cid, []byte{0xc0, 0xff, 0xee})
+ cid[len(cid)-1] = byte(seq)
+ return cid
+}
+
+// testPeerConnID returns the connection ID with a given sequence number
+// used by the fake peer of a Conn under test.
+func testPeerConnID(seq int64) []byte {
+ // Use a different length than we choose for our own conn ids,
+ // to help catch any bad assumptions.
+ return []byte{0xbe, 0xee, 0xff, byte(seq)}
+}
+
+func testPeerStatelessResetToken(seq int64) statelessResetToken {
+ return statelessResetToken{
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, byte(seq),
+ }
+}
+
+// canceledContext returns a canceled Context.
+//
+// Functions which take a context preference progress over cancelation.
+// For example, a read with a canceled context will return data if any is available.
+// Tests use canceled contexts to perform non-blocking operations.
+func canceledContext() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ return ctx
+}
diff --git a/quic/crypto_stream.go b/quic/crypto_stream.go
new file mode 100644
index 000000000..a4dcb32eb
--- /dev/null
+++ b/quic/crypto_stream.go
@@ -0,0 +1,141 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+// "Implementations MUST support buffering at least 4096 bytes of data
+// received in out-of-order CRYPTO frames."
+// https://www.rfc-editor.org/rfc/rfc9000.html#section-7.5-2
+//
+// 4096 is too small for real-world cases, however, so we allow more.
+const cryptoBufferSize = 1 << 20
+
+// A cryptoStream is the stream of data passed in CRYPTO frames.
+// There is one cryptoStream per packet number space.
+type cryptoStream struct {
+ // CRYPTO data received from the peer.
+ in pipe
+ inset rangeset[int64] // bytes received
+
+ // CRYPTO data queued for transmission to the peer.
+ out pipe
+ outunsent rangeset[int64] // bytes in need of sending
+ outacked rangeset[int64] // bytes acked by peer
+}
+
+// handleCrypto processes data received in a CRYPTO frame.
+func (s *cryptoStream) handleCrypto(off int64, b []byte, f func([]byte) error) error {
+ end := off + int64(len(b))
+ if end-s.inset.min() > cryptoBufferSize {
+ return localTransportError{
+ code: errCryptoBufferExceeded,
+ reason: "crypto buffer exceeded",
+ }
+ }
+ s.inset.add(off, end)
+ if off == s.in.start {
+ // Fast path: This is the next chunk of data in the stream,
+ // so just handle it immediately.
+ if err := f(b); err != nil {
+ return err
+ }
+ s.in.discardBefore(end)
+ } else {
+ // This is either data we've already processed,
+ // data we can't process yet, or a mix of both.
+ s.in.writeAt(b, off)
+ }
+ // s.in.start is the next byte in sequence.
+ // If it's in s.inset, we have bytes to provide.
+ // If it isn't, we don't--we're either out of data,
+ // or only have data that comes after the next byte.
+ if !s.inset.contains(s.in.start) {
+ return nil
+ }
+ // size is the size of the first contiguous chunk of bytes
+ // that have not been processed yet.
+ size := int(s.inset[0].end - s.in.start)
+ if size <= 0 {
+ return nil
+ }
+ err := s.in.read(s.in.start, size, f)
+ s.in.discardBefore(s.inset[0].end)
+ return err
+}
+
+// write queues data for sending to the peer.
+// It does not block or limit the amount of buffered data.
+// QUIC connections don't communicate the amount of CRYPTO data they are willing to buffer,
+// so we send what we have and the peer can close the connection if it is too much.
+func (s *cryptoStream) write(b []byte) {
+ start := s.out.end
+ s.out.writeAt(b, start)
+ s.outunsent.add(start, s.out.end)
+}
+
+// ackOrLoss reports that an CRYPTO frame sent by us has been acknowledged by the peer, or lost.
+func (s *cryptoStream) ackOrLoss(start, end int64, fate packetFate) {
+ switch fate {
+ case packetAcked:
+ s.outacked.add(start, end)
+ s.outunsent.sub(start, end)
+ // If this ack is for data at the start of the send buffer, we can now discard it.
+ if s.outacked.contains(s.out.start) {
+ s.out.discardBefore(s.outacked[0].end)
+ }
+ case packetLost:
+ // Mark everything lost, but not previously acked, as needing retransmission.
+ // We do this by adding all the lost bytes to outunsent, and then
+ // removing everything already acked.
+ s.outunsent.add(start, end)
+ for _, a := range s.outacked {
+ s.outunsent.sub(a.start, a.end)
+ }
+ }
+}
+
+// dataToSend reports what data should be sent in CRYPTO frames to the peer.
+// It calls f with each range of data to send.
+// f uses sendData to get the bytes to send, and returns the number of bytes sent.
+// dataToSend calls f until no data is left, or f returns 0.
+//
+// This function is unusually indirect (why not just return a []byte,
+// or implement io.Reader?).
+//
+// Returning a []byte to the caller either requires that we store the
+// data to send contiguously (which we don't), allocate a temporary buffer
+// and copy into it (inefficient), or return less data than we have available
+// (requires complexity to avoid unnecessarily breaking data across frames).
+//
+// Accepting a []byte from the caller (io.Reader) makes packet construction
+// difficult. Since CRYPTO data is encoded with a varint length prefix, the
+// location of the data depends on the length of the data. (We could hardcode
+// a 2-byte length, of course.)
+//
+// Instead, we tell the caller how much data is, the caller figures out where
+// to put it (and possibly decides that it doesn't have space for this data
+// in the packet after all), and the caller then makes a separate call to
+// copy the data it wants into position.
+func (s *cryptoStream) dataToSend(pto bool, f func(off, size int64) (sent int64)) {
+ for {
+ off, size := dataToSend(s.out.start, s.out.end, s.outunsent, s.outacked, pto)
+ if size == 0 {
+ return
+ }
+ n := f(off, size)
+ if n == 0 || pto {
+ return
+ }
+ }
+}
+
+// sendData fills b with data to send to the peer, starting at off,
+// and marks the data as sent. The caller must have already ascertained
+// that there is data to send in this region using dataToSend.
+func (s *cryptoStream) sendData(off int64, b []byte) {
+ s.out.copy(off, b)
+ s.outunsent.sub(off, off+int64(len(b)))
+}
diff --git a/quic/crypto_stream_test.go b/quic/crypto_stream_test.go
new file mode 100644
index 000000000..6bee8bb9f
--- /dev/null
+++ b/quic/crypto_stream_test.go
@@ -0,0 +1,280 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "crypto/rand"
+ "reflect"
+ "testing"
+)
+
+func TestCryptoStreamReceive(t *testing.T) {
+ data := make([]byte, 1<<20)
+ rand.Read(data) // doesn't need to be crypto/rand, but non-deprecated and harmless
+ type frame struct {
+ start int64
+ end int64
+ want int
+ }
+ for _, test := range []struct {
+ name string
+ frames []frame
+ }{{
+ name: "linear",
+ frames: []frame{{
+ start: 0,
+ end: 1000,
+ want: 1000,
+ }, {
+ start: 1000,
+ end: 2000,
+ want: 2000,
+ }, {
+ // larger than any realistic packet can hold
+ start: 2000,
+ end: 1 << 20,
+ want: 1 << 20,
+ }},
+ }, {
+ name: "out of order",
+ frames: []frame{{
+ start: 1000,
+ end: 2000,
+ }, {
+ start: 2000,
+ end: 3000,
+ }, {
+ start: 0,
+ end: 1000,
+ want: 3000,
+ }},
+ }, {
+ name: "resent",
+ frames: []frame{{
+ start: 0,
+ end: 1000,
+ want: 1000,
+ }, {
+ start: 0,
+ end: 1000,
+ want: 1000,
+ }, {
+ start: 1000,
+ end: 2000,
+ want: 2000,
+ }, {
+ start: 0,
+ end: 1000,
+ want: 2000,
+ }, {
+ start: 1000,
+ end: 2000,
+ want: 2000,
+ }},
+ }, {
+ name: "overlapping",
+ frames: []frame{{
+ start: 0,
+ end: 1000,
+ want: 1000,
+ }, {
+ start: 3000,
+ end: 4000,
+ want: 1000,
+ }, {
+ start: 2000,
+ end: 3000,
+ want: 1000,
+ }, {
+ start: 1000,
+ end: 3000,
+ want: 4000,
+ }},
+ }, {
+ name: "resent consumed data",
+ frames: []frame{{
+ start: 0,
+ end: 1000,
+ want: 1000,
+ }, {
+ start: 1000,
+ end: 2000,
+ want: 2000,
+ }, {
+ start: 0,
+ end: 1000,
+ want: 2000,
+ }},
+ }} {
+ t.Run(test.name, func(t *testing.T) {
+ var s cryptoStream
+ var got []byte
+ for _, f := range test.frames {
+ t.Logf("receive [%v,%v)", f.start, f.end)
+ s.handleCrypto(
+ f.start,
+ data[f.start:f.end],
+ func(b []byte) error {
+ t.Logf("got new bytes [%v,%v)", len(got), len(got)+len(b))
+ got = append(got, b...)
+ return nil
+ },
+ )
+ if len(got) != f.want {
+ t.Fatalf("have bytes [0,%v), want [0,%v)", len(got), f.want)
+ }
+ for i := range got {
+ if got[i] != data[i] {
+ t.Fatalf("byte %v of received data = %v, want %v", i, got[i], data[i])
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestCryptoStreamSends(t *testing.T) {
+ data := make([]byte, 1<<20)
+ rand.Read(data) // doesn't need to be crypto/rand, but non-deprecated and harmless
+ type (
+ sendOp i64range[int64]
+ ackOp i64range[int64]
+ lossOp i64range[int64]
+ )
+ for _, test := range []struct {
+ name string
+ size int64
+ ops []any
+ wantSend []i64range[int64]
+ wantPTOSend []i64range[int64]
+ }{{
+ name: "writes with data remaining",
+ size: 4000,
+ ops: []any{
+ sendOp{0, 1000},
+ sendOp{1000, 2000},
+ sendOp{2000, 3000},
+ },
+ wantSend: []i64range[int64]{
+ {3000, 4000},
+ },
+ wantPTOSend: []i64range[int64]{
+ {0, 4000},
+ },
+ }, {
+ name: "lost data is resent",
+ size: 4000,
+ ops: []any{
+ sendOp{0, 1000},
+ sendOp{1000, 2000},
+ sendOp{2000, 3000},
+ sendOp{3000, 4000},
+ lossOp{1000, 2000},
+ lossOp{3000, 4000},
+ },
+ wantSend: []i64range[int64]{
+ {1000, 2000},
+ {3000, 4000},
+ },
+ wantPTOSend: []i64range[int64]{
+ {0, 4000},
+ },
+ }, {
+ name: "acked data at start of range",
+ size: 4000,
+ ops: []any{
+ sendOp{0, 4000},
+ ackOp{0, 1000},
+ ackOp{1000, 2000},
+ ackOp{2000, 3000},
+ },
+ wantSend: nil,
+ wantPTOSend: []i64range[int64]{
+ {3000, 4000},
+ },
+ }, {
+ name: "acked data is not resent on pto",
+ size: 4000,
+ ops: []any{
+ sendOp{0, 4000},
+ ackOp{1000, 2000},
+ },
+ wantSend: nil,
+ wantPTOSend: []i64range[int64]{
+ {0, 1000},
+ },
+ }, {
+ // This is an unusual, but possible scenario:
+ // Data is sent, resent, one of the two sends is acked, and the other is lost.
+ name: "acked and then lost data is not resent",
+ size: 4000,
+ ops: []any{
+ sendOp{0, 4000},
+ sendOp{1000, 2000}, // resent, no-op
+ ackOp{1000, 2000},
+ lossOp{1000, 2000},
+ },
+ wantSend: nil,
+ wantPTOSend: []i64range[int64]{
+ {0, 1000},
+ },
+ }, {
+ // The opposite of the above scenario: data is marked lost, and then acked
+ // before being resent.
+ name: "lost and then acked data is not resent",
+ size: 4000,
+ ops: []any{
+ sendOp{0, 4000},
+ sendOp{1000, 2000}, // resent, no-op
+ lossOp{1000, 2000},
+ ackOp{1000, 2000},
+ },
+ wantSend: nil,
+ wantPTOSend: []i64range[int64]{
+ {0, 1000},
+ },
+ }} {
+ t.Run(test.name, func(t *testing.T) {
+ var s cryptoStream
+ s.write(data[:test.size])
+ for _, op := range test.ops {
+ switch op := op.(type) {
+ case sendOp:
+ t.Logf("send [%v,%v)", op.start, op.end)
+ b := make([]byte, op.end-op.start)
+ s.sendData(op.start, b)
+ case ackOp:
+ t.Logf("ack [%v,%v)", op.start, op.end)
+ s.ackOrLoss(op.start, op.end, packetAcked)
+ case lossOp:
+ t.Logf("loss [%v,%v)", op.start, op.end)
+ s.ackOrLoss(op.start, op.end, packetLost)
+ default:
+ t.Fatalf("unhandled type %T", op)
+ }
+ }
+ var gotSend []i64range[int64]
+ s.dataToSend(true, func(off, size int64) (wrote int64) {
+ gotSend = append(gotSend, i64range[int64]{off, off + size})
+ return 0
+ })
+ if !reflect.DeepEqual(gotSend, test.wantPTOSend) {
+ t.Fatalf("got data to send on PTO: %v, want %v", gotSend, test.wantPTOSend)
+ }
+ gotSend = nil
+ s.dataToSend(false, func(off, size int64) (wrote int64) {
+ gotSend = append(gotSend, i64range[int64]{off, off + size})
+ b := make([]byte, size)
+ s.sendData(off, b)
+ return int64(len(b))
+ })
+ if !reflect.DeepEqual(gotSend, test.wantSend) {
+ t.Fatalf("got data to send: %v, want %v", gotSend, test.wantSend)
+ }
+ })
+ }
+}
diff --git a/quic/dgram.go b/quic/dgram.go
new file mode 100644
index 000000000..615589373
--- /dev/null
+++ b/quic/dgram.go
@@ -0,0 +1,55 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "net/netip"
+ "sync"
+)
+
+type datagram struct {
+ b []byte
+ localAddr netip.AddrPort
+ peerAddr netip.AddrPort
+ ecn ecnBits
+}
+
+// Explicit Congestion Notification bits.
+//
+// https://www.rfc-editor.org/rfc/rfc3168.html#section-5
+type ecnBits byte
+
+const (
+ ecnMask = 0b000000_11
+ ecnNotECT = 0b000000_00
+ ecnECT1 = 0b000000_01
+ ecnECT0 = 0b000000_10
+ ecnCE = 0b000000_11
+)
+
+var datagramPool = sync.Pool{
+ New: func() any {
+ return &datagram{
+ b: make([]byte, maxUDPPayloadSize),
+ }
+ },
+}
+
+func newDatagram() *datagram {
+ m := datagramPool.Get().(*datagram)
+ *m = datagram{
+ b: m.b[:cap(m.b)],
+ }
+ return m
+}
+
+func (m *datagram) recycle() {
+ if cap(m.b) != maxUDPPayloadSize {
+ return
+ }
+ datagramPool.Put(m)
+}
diff --git a/quic/doc.go b/quic/doc.go
new file mode 100644
index 000000000..2fd10f087
--- /dev/null
+++ b/quic/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quic implements the QUIC protocol.
+//
+// This package is a work in progress.
+// It is not ready for production usage.
+// Its API is subject to change without notice.
+//
+// This package is low-level.
+// Most users will use it indirectly through an HTTP/3 implementation.
+//
+// # Usage
+//
+// An [Endpoint] sends and receives traffic on a network address.
+// Create an Endpoint to either accept inbound QUIC connections
+// or create outbound ones.
+//
+// A [Conn] is a QUIC connection.
+//
+// A [Stream] is a QUIC stream, an ordered, reliable byte stream.
+//
+// # Cancelation
+//
+// All blocking operations may be canceled using a context.Context.
+// When performing an operation with a canceled context, the operation
+// will succeed if doing so does not require blocking. For example,
+// reading from a stream will return data when buffered data is available,
+// even if the stream context is canceled.
+//
+// # Limitations
+//
+// This package is a work in progress.
+// Known limitations include:
+//
+// - Performance is untuned.
+// - 0-RTT is not supported.
+// - Address migration is not supported.
+// - Server preferred addresses are not supported.
+// - The latency spin bit is not supported.
+// - Stream send/receive windows are configurable,
+// but are fixed and do not adapt to available throughput.
+// - Path MTU discovery is not implemented.
+package quic
diff --git a/quic/endpoint.go b/quic/endpoint.go
new file mode 100644
index 000000000..a55336b24
--- /dev/null
+++ b/quic/endpoint.go
@@ -0,0 +1,461 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "net"
+ "net/netip"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// An Endpoint handles QUIC traffic on a network address.
+// It can accept inbound connections or create outbound ones.
+//
+// Multiple goroutines may invoke methods on an Endpoint simultaneously.
+type Endpoint struct {
+ listenConfig *Config
+ packetConn packetConn
+ testHooks endpointTestHooks
+ resetGen statelessResetTokenGenerator
+ retry retryState
+
+ acceptQueue queue[*Conn] // new inbound connections
+ connsMap connsMap // only accessed by the listen loop
+
+ connsMu sync.Mutex
+ conns map[*Conn]struct{}
+ closing bool // set when Close is called
+ closec chan struct{} // closed when the listen loop exits
+}
+
+type endpointTestHooks interface {
+ timeNow() time.Time
+ newConn(c *Conn)
+}
+
+// A packetConn is the interface to sending and receiving UDP packets.
+type packetConn interface {
+ Close() error
+ LocalAddr() netip.AddrPort
+ Read(f func(*datagram))
+ Write(datagram) error
+}
+
+// Listen listens on a local network address.
+//
+// The config is used to for connections accepted by the endpoint.
+// If the config is nil, the endpoint will not accept connections.
+func Listen(network, address string, listenConfig *Config) (*Endpoint, error) {
+ if listenConfig != nil && listenConfig.TLSConfig == nil {
+ return nil, errors.New("TLSConfig is not set")
+ }
+ a, err := net.ResolveUDPAddr(network, address)
+ if err != nil {
+ return nil, err
+ }
+ udpConn, err := net.ListenUDP(network, a)
+ if err != nil {
+ return nil, err
+ }
+ pc, err := newNetUDPConn(udpConn)
+ if err != nil {
+ return nil, err
+ }
+ return newEndpoint(pc, listenConfig, nil)
+}
+
+func newEndpoint(pc packetConn, config *Config, hooks endpointTestHooks) (*Endpoint, error) {
+ e := &Endpoint{
+ listenConfig: config,
+ packetConn: pc,
+ testHooks: hooks,
+ conns: make(map[*Conn]struct{}),
+ acceptQueue: newQueue[*Conn](),
+ closec: make(chan struct{}),
+ }
+ var statelessResetKey [32]byte
+ if config != nil {
+ statelessResetKey = config.StatelessResetKey
+ }
+ e.resetGen.init(statelessResetKey)
+ e.connsMap.init()
+ if config != nil && config.RequireAddressValidation {
+ if err := e.retry.init(); err != nil {
+ return nil, err
+ }
+ }
+ go e.listen()
+ return e, nil
+}
+
+// LocalAddr returns the local network address.
+func (e *Endpoint) LocalAddr() netip.AddrPort {
+ return e.packetConn.LocalAddr()
+}
+
+// Close closes the Endpoint.
+// Any blocked operations on the Endpoint or associated Conns and Stream will be unblocked
+// and return errors.
+//
+// Close aborts every open connection.
+// Data in stream read and write buffers is discarded.
+// It waits for the peers of any open connection to acknowledge the connection has been closed.
+func (e *Endpoint) Close(ctx context.Context) error {
+ e.acceptQueue.close(errors.New("endpoint closed"))
+
+ // It isn't safe to call Conn.Abort or conn.exit with connsMu held,
+ // so copy the list of conns.
+ var conns []*Conn
+ e.connsMu.Lock()
+ if !e.closing {
+ e.closing = true // setting e.closing prevents new conns from being created
+ for c := range e.conns {
+ conns = append(conns, c)
+ }
+ if len(e.conns) == 0 {
+ e.packetConn.Close()
+ }
+ }
+ e.connsMu.Unlock()
+
+ for _, c := range conns {
+ c.Abort(localTransportError{code: errNo})
+ }
+ select {
+ case <-e.closec:
+ case <-ctx.Done():
+ for _, c := range conns {
+ c.exit()
+ }
+ return ctx.Err()
+ }
+ return nil
+}
+
+// Accept waits for and returns the next connection.
+func (e *Endpoint) Accept(ctx context.Context) (*Conn, error) {
+ return e.acceptQueue.get(ctx, nil)
+}
+
+// Dial creates and returns a connection to a network address.
+// The config cannot be nil.
+func (e *Endpoint) Dial(ctx context.Context, network, address string, config *Config) (*Conn, error) {
+ u, err := net.ResolveUDPAddr(network, address)
+ if err != nil {
+ return nil, err
+ }
+ addr := u.AddrPort()
+ addr = netip.AddrPortFrom(addr.Addr().Unmap(), addr.Port())
+ c, err := e.newConn(time.Now(), config, clientSide, newServerConnIDs{}, address, addr)
+ if err != nil {
+ return nil, err
+ }
+ if err := c.waitReady(ctx); err != nil {
+ c.Abort(nil)
+ return nil, err
+ }
+ return c, nil
+}
+
+func (e *Endpoint) newConn(now time.Time, config *Config, side connSide, cids newServerConnIDs, peerHostname string, peerAddr netip.AddrPort) (*Conn, error) {
+ e.connsMu.Lock()
+ defer e.connsMu.Unlock()
+ if e.closing {
+ return nil, errors.New("endpoint closed")
+ }
+ c, err := newConn(now, side, cids, peerHostname, peerAddr, config, e)
+ if err != nil {
+ return nil, err
+ }
+ e.conns[c] = struct{}{}
+ return c, nil
+}
+
+// serverConnEstablished is called by a conn when the handshake completes
+// for an inbound (serverSide) connection.
+func (e *Endpoint) serverConnEstablished(c *Conn) {
+ e.acceptQueue.put(c)
+}
+
+// connDrained is called by a conn when it leaves the draining state,
+// either when the peer acknowledges connection closure or the drain timeout expires.
+func (e *Endpoint) connDrained(c *Conn) {
+ var cids [][]byte
+ for i := range c.connIDState.local {
+ cids = append(cids, c.connIDState.local[i].cid)
+ }
+ var tokens []statelessResetToken
+ for i := range c.connIDState.remote {
+ tokens = append(tokens, c.connIDState.remote[i].resetToken)
+ }
+ e.connsMap.updateConnIDs(func(conns *connsMap) {
+ for _, cid := range cids {
+ conns.retireConnID(c, cid)
+ }
+ for _, token := range tokens {
+ conns.retireResetToken(c, token)
+ }
+ })
+ e.connsMu.Lock()
+ defer e.connsMu.Unlock()
+ delete(e.conns, c)
+ if e.closing && len(e.conns) == 0 {
+ e.packetConn.Close()
+ }
+}
+
+func (e *Endpoint) listen() {
+ defer close(e.closec)
+ e.packetConn.Read(func(m *datagram) {
+ if e.connsMap.updateNeeded.Load() {
+ e.connsMap.applyUpdates()
+ }
+ e.handleDatagram(m)
+ })
+}
+
+func (e *Endpoint) handleDatagram(m *datagram) {
+ dstConnID, ok := dstConnIDForDatagram(m.b)
+ if !ok {
+ m.recycle()
+ return
+ }
+ c := e.connsMap.byConnID[string(dstConnID)]
+ if c == nil {
+ // TODO: Move this branch into a separate goroutine to avoid blocking
+ // the endpoint while processing packets.
+ e.handleUnknownDestinationDatagram(m)
+ return
+ }
+
+ // TODO: This can block the endpoint while waiting for the conn to accept the dgram.
+ // Think about buffering between the receive loop and the conn.
+ c.sendMsg(m)
+}
+
+func (e *Endpoint) handleUnknownDestinationDatagram(m *datagram) {
+ defer func() {
+ if m != nil {
+ m.recycle()
+ }
+ }()
+ const minimumValidPacketSize = 21
+ if len(m.b) < minimumValidPacketSize {
+ return
+ }
+ var now time.Time
+ if e.testHooks != nil {
+ now = e.testHooks.timeNow()
+ } else {
+ now = time.Now()
+ }
+ // Check to see if this is a stateless reset.
+ var token statelessResetToken
+ copy(token[:], m.b[len(m.b)-len(token):])
+ if c := e.connsMap.byResetToken[token]; c != nil {
+ c.sendMsg(func(now time.Time, c *Conn) {
+ c.handleStatelessReset(now, token)
+ })
+ return
+ }
+ // If this is a 1-RTT packet, there's nothing productive we can do with it.
+ // Send a stateless reset if possible.
+ if !isLongHeader(m.b[0]) {
+ e.maybeSendStatelessReset(m.b, m.peerAddr)
+ return
+ }
+ p, ok := parseGenericLongHeaderPacket(m.b)
+ if !ok || len(m.b) < paddedInitialDatagramSize {
+ return
+ }
+ switch p.version {
+ case quicVersion1:
+ case 0:
+ // Version Negotiation for an unknown connection.
+ return
+ default:
+ // Unknown version.
+ e.sendVersionNegotiation(p, m.peerAddr)
+ return
+ }
+ if getPacketType(m.b) != packetTypeInitial {
+ // This packet isn't trying to create a new connection.
+ // It might be associated with some connection we've lost state for.
+ // We are technically permitted to send a stateless reset for
+ // a long-header packet, but this isn't generally useful. See:
+ // https://www.rfc-editor.org/rfc/rfc9000#section-10.3-16
+ return
+ }
+ if e.listenConfig == nil {
+ // We are not configured to accept connections.
+ return
+ }
+ cids := newServerConnIDs{
+ srcConnID: p.srcConnID,
+ dstConnID: p.dstConnID,
+ }
+ if e.listenConfig.RequireAddressValidation {
+ var ok bool
+ cids.retrySrcConnID = p.dstConnID
+ cids.originalDstConnID, ok = e.validateInitialAddress(now, p, m.peerAddr)
+ if !ok {
+ return
+ }
+ } else {
+ cids.originalDstConnID = p.dstConnID
+ }
+ var err error
+ c, err := e.newConn(now, e.listenConfig, serverSide, cids, "", m.peerAddr)
+ if err != nil {
+ // The accept queue is probably full.
+ // We could send a CONNECTION_CLOSE to the peer to reject the connection.
+ // Currently, we just drop the datagram.
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-5.2.2-5
+ return
+ }
+ c.sendMsg(m)
+ m = nil // don't recycle, sendMsg takes ownership
+}
+
+func (e *Endpoint) maybeSendStatelessReset(b []byte, peerAddr netip.AddrPort) {
+ if !e.resetGen.canReset {
+ // Config.StatelessResetKey isn't set, so we don't send stateless resets.
+ return
+ }
+ // The smallest possible valid packet a peer can send us is:
+ // 1 byte of header
+ // connIDLen bytes of destination connection ID
+ // 1 byte of packet number
+ // 1 byte of payload
+ // 16 bytes AEAD expansion
+ if len(b) < 1+connIDLen+1+1+16 {
+ return
+ }
+ // TODO: Rate limit stateless resets.
+ cid := b[1:][:connIDLen]
+ token := e.resetGen.tokenForConnID(cid)
+ // We want to generate a stateless reset that is as short as possible,
+ // but long enough to be difficult to distinguish from a 1-RTT packet.
+ //
+ // The minimal 1-RTT packet is:
+ // 1 byte of header
+ // 0-20 bytes of destination connection ID
+ // 1-4 bytes of packet number
+ // 1 byte of payload
+ // 16 bytes AEAD expansion
+ //
+ // Assuming the maximum possible connection ID and packet number size,
+ // this gives 1 + 20 + 4 + 1 + 16 = 42 bytes.
+ //
+ // We also must generate a stateless reset that is shorter than the datagram
+ // we are responding to, in order to ensure that reset loops terminate.
+ //
+ // See: https://www.rfc-editor.org/rfc/rfc9000#section-10.3
+ size := min(len(b)-1, 42)
+ // Reuse the input buffer for generating the stateless reset.
+ b = b[:size]
+ rand.Read(b[:len(b)-statelessResetTokenLen])
+ b[0] &^= headerFormLong // clear long header bit
+ b[0] |= fixedBit // set fixed bit
+ copy(b[len(b)-statelessResetTokenLen:], token[:])
+ e.sendDatagram(datagram{
+ b: b,
+ peerAddr: peerAddr,
+ })
+}
+
+func (e *Endpoint) sendVersionNegotiation(p genericLongPacket, peerAddr netip.AddrPort) {
+ m := newDatagram()
+ m.b = appendVersionNegotiation(m.b[:0], p.srcConnID, p.dstConnID, quicVersion1)
+ m.peerAddr = peerAddr
+ e.sendDatagram(*m)
+ m.recycle()
+}
+
+func (e *Endpoint) sendConnectionClose(in genericLongPacket, peerAddr netip.AddrPort, code transportError) {
+ keys := initialKeys(in.dstConnID, serverSide)
+ var w packetWriter
+ p := longPacket{
+ ptype: packetTypeInitial,
+ version: quicVersion1,
+ num: 0,
+ dstConnID: in.srcConnID,
+ srcConnID: in.dstConnID,
+ }
+ const pnumMaxAcked = 0
+ w.reset(paddedInitialDatagramSize)
+ w.startProtectedLongHeaderPacket(pnumMaxAcked, p)
+ w.appendConnectionCloseTransportFrame(code, 0, "")
+ w.finishProtectedLongHeaderPacket(pnumMaxAcked, keys.w, p)
+ buf := w.datagram()
+ if len(buf) == 0 {
+ return
+ }
+ e.sendDatagram(datagram{
+ b: buf,
+ peerAddr: peerAddr,
+ })
+}
+
+func (e *Endpoint) sendDatagram(dgram datagram) error {
+ return e.packetConn.Write(dgram)
+}
+
+// A connsMap is an endpoint's mapping of conn ids and reset tokens to conns.
+type connsMap struct {
+ byConnID map[string]*Conn
+ byResetToken map[statelessResetToken]*Conn
+
+ updateMu sync.Mutex
+ updateNeeded atomic.Bool
+ updates []func(*connsMap)
+}
+
+func (m *connsMap) init() {
+ m.byConnID = map[string]*Conn{}
+ m.byResetToken = map[statelessResetToken]*Conn{}
+}
+
+func (m *connsMap) addConnID(c *Conn, cid []byte) {
+ m.byConnID[string(cid)] = c
+}
+
+func (m *connsMap) retireConnID(c *Conn, cid []byte) {
+ delete(m.byConnID, string(cid))
+}
+
+func (m *connsMap) addResetToken(c *Conn, token statelessResetToken) {
+ m.byResetToken[token] = c
+}
+
+func (m *connsMap) retireResetToken(c *Conn, token statelessResetToken) {
+ delete(m.byResetToken, token)
+}
+
+func (m *connsMap) updateConnIDs(f func(*connsMap)) {
+ m.updateMu.Lock()
+ defer m.updateMu.Unlock()
+ m.updates = append(m.updates, f)
+ m.updateNeeded.Store(true)
+}
+
+// applyConnIDUpdates is called by the datagram receive loop to update its connection ID map.
+func (m *connsMap) applyUpdates() {
+ m.updateMu.Lock()
+ defer m.updateMu.Unlock()
+ for _, f := range m.updates {
+ f(m)
+ }
+ clear(m.updates)
+ m.updates = m.updates[:0]
+ m.updateNeeded.Store(false)
+}
diff --git a/quic/endpoint_test.go b/quic/endpoint_test.go
new file mode 100644
index 000000000..d5f436e6d
--- /dev/null
+++ b/quic/endpoint_test.go
@@ -0,0 +1,330 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "io"
+ "log/slog"
+ "net/netip"
+ "testing"
+ "time"
+
+ "golang.org/x/net/quic/qlog"
+)
+
+func TestConnect(t *testing.T) {
+ newLocalConnPair(t, &Config{}, &Config{})
+}
+
+func TestStreamTransfer(t *testing.T) {
+ ctx := context.Background()
+ cli, srv := newLocalConnPair(t, &Config{}, &Config{})
+ data := makeTestData(1 << 20)
+
+ srvdone := make(chan struct{})
+ go func() {
+ defer close(srvdone)
+ s, err := srv.AcceptStream(ctx)
+ if err != nil {
+ t.Errorf("AcceptStream: %v", err)
+ return
+ }
+ b, err := io.ReadAll(s)
+ if err != nil {
+ t.Errorf("io.ReadAll(s): %v", err)
+ return
+ }
+ if !bytes.Equal(b, data) {
+ t.Errorf("read data mismatch (got %v bytes, want %v", len(b), len(data))
+ }
+ if err := s.Close(); err != nil {
+ t.Errorf("s.Close() = %v", err)
+ }
+ }()
+
+ s, err := cli.NewSendOnlyStream(ctx)
+ if err != nil {
+ t.Fatalf("NewStream: %v", err)
+ }
+ n, err := io.Copy(s, bytes.NewBuffer(data))
+ if n != int64(len(data)) || err != nil {
+ t.Fatalf("io.Copy(s, data) = %v, %v; want %v, nil", n, err, len(data))
+ }
+ if err := s.Close(); err != nil {
+ t.Fatalf("s.Close() = %v", err)
+ }
+}
+
+func newLocalConnPair(t testing.TB, conf1, conf2 *Config) (clientConn, serverConn *Conn) {
+ t.Helper()
+ ctx := context.Background()
+ e1 := newLocalEndpoint(t, serverSide, conf1)
+ e2 := newLocalEndpoint(t, clientSide, conf2)
+ conf2 = makeTestConfig(conf2, clientSide)
+ c2, err := e2.Dial(ctx, "udp", e1.LocalAddr().String(), conf2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c1, err := e1.Accept(ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return c2, c1
+}
+
+func newLocalEndpoint(t testing.TB, side connSide, conf *Config) *Endpoint {
+ t.Helper()
+ conf = makeTestConfig(conf, side)
+ e, err := Listen("udp", "127.0.0.1:0", conf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(func() {
+ e.Close(canceledContext())
+ })
+ return e
+}
+
+func makeTestConfig(conf *Config, side connSide) *Config {
+ if conf == nil {
+ return nil
+ }
+ newConf := *conf
+ conf = &newConf
+ if conf.TLSConfig == nil {
+ conf.TLSConfig = newTestTLSConfig(side)
+ }
+ if conf.QLogLogger == nil {
+ conf.QLogLogger = slog.New(qlog.NewJSONHandler(qlog.HandlerOptions{
+ Level: QLogLevelFrame,
+ Dir: *qlogdir,
+ }))
+ }
+ return conf
+}
+
+type testEndpoint struct {
+ t *testing.T
+ e *Endpoint
+ now time.Time
+ recvc chan *datagram
+ idlec chan struct{}
+ conns map[*Conn]*testConn
+ acceptQueue []*testConn
+ configTransportParams []func(*transportParameters)
+ configTestConn []func(*testConn)
+ sentDatagrams [][]byte
+ peerTLSConn *tls.QUICConn
+ lastInitialDstConnID []byte // for parsing Retry packets
+}
+
+func newTestEndpoint(t *testing.T, config *Config) *testEndpoint {
+ te := &testEndpoint{
+ t: t,
+ now: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC),
+ recvc: make(chan *datagram),
+ idlec: make(chan struct{}),
+ conns: make(map[*Conn]*testConn),
+ }
+ var err error
+ te.e, err = newEndpoint((*testEndpointUDPConn)(te), config, (*testEndpointHooks)(te))
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(te.cleanup)
+ return te
+}
+
+func (te *testEndpoint) cleanup() {
+ te.e.Close(canceledContext())
+}
+
+func (te *testEndpoint) wait() {
+ select {
+ case te.idlec <- struct{}{}:
+ case <-te.e.closec:
+ }
+ for _, tc := range te.conns {
+ tc.wait()
+ }
+}
+
+// accept returns a server connection from the endpoint.
+// Unlike Endpoint.Accept, connections are available as soon as they are created.
+func (te *testEndpoint) accept() *testConn {
+ if len(te.acceptQueue) == 0 {
+ te.t.Fatalf("accept: expected available conn, but found none")
+ }
+ tc := te.acceptQueue[0]
+ te.acceptQueue = te.acceptQueue[1:]
+ return tc
+}
+
+func (te *testEndpoint) write(d *datagram) {
+ te.recvc <- d
+ te.wait()
+}
+
+var testClientAddr = netip.MustParseAddrPort("10.0.0.1:8000")
+
+func (te *testEndpoint) writeDatagram(d *testDatagram) {
+ te.t.Helper()
+ logDatagram(te.t, "<- endpoint under test receives", d)
+ var buf []byte
+ for _, p := range d.packets {
+ tc := te.connForDestination(p.dstConnID)
+ if p.ptype != packetTypeRetry && tc != nil {
+ space := spaceForPacketType(p.ptype)
+ if p.num >= tc.peerNextPacketNum[space] {
+ tc.peerNextPacketNum[space] = p.num + 1
+ }
+ }
+ if p.ptype == packetTypeInitial {
+ te.lastInitialDstConnID = p.dstConnID
+ }
+ pad := 0
+ if p.ptype == packetType1RTT {
+ pad = d.paddedSize - len(buf)
+ }
+ buf = append(buf, encodeTestPacket(te.t, tc, p, pad)...)
+ }
+ for len(buf) < d.paddedSize {
+ buf = append(buf, 0)
+ }
+ te.write(&datagram{
+ b: buf,
+ peerAddr: d.addr,
+ })
+}
+
+func (te *testEndpoint) connForDestination(dstConnID []byte) *testConn {
+ for _, tc := range te.conns {
+ for _, loc := range tc.conn.connIDState.local {
+ if bytes.Equal(loc.cid, dstConnID) {
+ return tc
+ }
+ }
+ }
+ return nil
+}
+
+func (te *testEndpoint) connForSource(srcConnID []byte) *testConn {
+ for _, tc := range te.conns {
+ for _, loc := range tc.conn.connIDState.remote {
+ if bytes.Equal(loc.cid, srcConnID) {
+ return tc
+ }
+ }
+ }
+ return nil
+}
+
+func (te *testEndpoint) read() []byte {
+ te.t.Helper()
+ te.wait()
+ if len(te.sentDatagrams) == 0 {
+ return nil
+ }
+ d := te.sentDatagrams[0]
+ te.sentDatagrams = te.sentDatagrams[1:]
+ return d
+}
+
+func (te *testEndpoint) readDatagram() *testDatagram {
+ te.t.Helper()
+ buf := te.read()
+ if buf == nil {
+ return nil
+ }
+ p, _ := parseGenericLongHeaderPacket(buf)
+ tc := te.connForSource(p.dstConnID)
+ d := parseTestDatagram(te.t, te, tc, buf)
+ logDatagram(te.t, "-> endpoint under test sends", d)
+ return d
+}
+
+// wantDatagram indicates that we expect the Endpoint to send a datagram.
+func (te *testEndpoint) wantDatagram(expectation string, want *testDatagram) {
+ te.t.Helper()
+ got := te.readDatagram()
+ if !datagramEqual(got, want) {
+ te.t.Fatalf("%v:\ngot datagram: %v\nwant datagram: %v", expectation, got, want)
+ }
+}
+
+// wantIdle indicates that we expect the Endpoint to not send any more datagrams.
+func (te *testEndpoint) wantIdle(expectation string) {
+ if got := te.readDatagram(); got != nil {
+ te.t.Fatalf("expect: %v\nunexpectedly got: %v", expectation, got)
+ }
+}
+
+// advance causes time to pass.
+func (te *testEndpoint) advance(d time.Duration) {
+ te.t.Helper()
+ te.advanceTo(te.now.Add(d))
+}
+
+// advanceTo sets the current time.
+func (te *testEndpoint) advanceTo(now time.Time) {
+ te.t.Helper()
+ if te.now.After(now) {
+ te.t.Fatalf("time moved backwards: %v -> %v", te.now, now)
+ }
+ te.now = now
+ for _, tc := range te.conns {
+ if !tc.timer.After(te.now) {
+ tc.conn.sendMsg(timerEvent{})
+ tc.wait()
+ }
+ }
+}
+
+// testEndpointHooks implements endpointTestHooks.
+type testEndpointHooks testEndpoint
+
+func (te *testEndpointHooks) timeNow() time.Time {
+ return te.now
+}
+
+func (te *testEndpointHooks) newConn(c *Conn) {
+ tc := newTestConnForConn(te.t, (*testEndpoint)(te), c)
+ te.conns[c] = tc
+}
+
+// testEndpointUDPConn implements UDPConn.
+type testEndpointUDPConn testEndpoint
+
+func (te *testEndpointUDPConn) Close() error {
+ close(te.recvc)
+ return nil
+}
+
+func (te *testEndpointUDPConn) LocalAddr() netip.AddrPort {
+ return netip.MustParseAddrPort("127.0.0.1:443")
+}
+
+func (te *testEndpointUDPConn) Read(f func(*datagram)) {
+ for {
+ select {
+ case d, ok := <-te.recvc:
+ if !ok {
+ return
+ }
+ f(d)
+ case <-te.idlec:
+ }
+ }
+}
+
+func (te *testEndpointUDPConn) Write(dgram datagram) error {
+ te.sentDatagrams = append(te.sentDatagrams, append([]byte(nil), dgram.b...))
+ return nil
+}
diff --git a/internal/quic/errors.go b/quic/errors.go
similarity index 80%
rename from internal/quic/errors.go
rename to quic/errors.go
index a9ebbe4b7..954793cfc 100644
--- a/internal/quic/errors.go
+++ b/quic/errors.go
@@ -10,7 +10,7 @@ import (
"fmt"
)
-// A transportError is an transport error code from RFC 9000 Section 20.1.
+// A transportError is a transport error code from RFC 9000 Section 20.1.
//
// The transportError type doesn't implement the error interface to ensure we always
// distinguish between errors sent to and received from the peer.
@@ -83,10 +83,16 @@ func (e transportError) String() string {
}
// A localTransportError is an error sent to the peer.
-type localTransportError transportError
+type localTransportError struct {
+ code transportError
+ reason string
+}
func (e localTransportError) Error() string {
- return "closed connection: " + transportError(e).String()
+ if e.reason == "" {
+ return fmt.Sprintf("closed connection: %v", e.code)
+ }
+ return fmt.Sprintf("closed connection: %v: %q", e.code, e.reason)
}
// A peerTransportError is an error received from the peer.
@@ -99,6 +105,14 @@ func (e peerTransportError) Error() string {
return fmt.Sprintf("peer closed connection: %v: %q", e.code, e.reason)
}
+// A StreamErrorCode is an application protocol error code (RFC 9000, Section 20.2)
+// indicating whay a stream is being closed.
+type StreamErrorCode uint64
+
+func (e StreamErrorCode) Error() string {
+ return fmt.Sprintf("stream error code %v", uint64(e))
+}
+
// An ApplicationError is an application protocol error code (RFC 9000, Section 20.2).
// Application protocol errors may be sent when terminating a stream or connection.
type ApplicationError struct {
@@ -106,7 +120,13 @@ type ApplicationError struct {
Reason string
}
-func (e ApplicationError) Error() string {
+func (e *ApplicationError) Error() string {
// TODO: Include the Reason string here, but sanitize it first.
return fmt.Sprintf("AppError %v", e.Code)
}
+
+// Is reports a match if err is an *ApplicationError with a matching Code.
+func (e *ApplicationError) Is(err error) bool {
+ e2, ok := err.(*ApplicationError)
+ return ok && e2.Code == e.Code
+}
diff --git a/internal/quic/files_test.go b/quic/files_test.go
similarity index 100%
rename from internal/quic/files_test.go
rename to quic/files_test.go
diff --git a/internal/quic/frame_debug.go b/quic/frame_debug.go
similarity index 67%
rename from internal/quic/frame_debug.go
rename to quic/frame_debug.go
index 945bb9d1f..17234dd7c 100644
--- a/internal/quic/frame_debug.go
+++ b/quic/frame_debug.go
@@ -8,6 +8,9 @@ package quic
import (
"fmt"
+ "log/slog"
+ "strconv"
+ "time"
)
// A debugFrame is a representation of the contents of a QUIC frame,
@@ -15,6 +18,7 @@ import (
type debugFrame interface {
String() string
write(w *packetWriter) bool
+ LogValue() slog.Value
}
func parseDebugFrame(b []byte) (f debugFrame, n int) {
@@ -73,6 +77,7 @@ func parseDebugFrame(b []byte) (f debugFrame, n int) {
// debugFramePadding is a sequence of PADDING frames.
type debugFramePadding struct {
size int
+ to int // alternate for writing packets: pad to
}
func parseDebugFramePadding(b []byte) (f debugFramePadding, n int) {
@@ -91,12 +96,23 @@ func (f debugFramePadding) write(w *packetWriter) bool {
if w.avail() == 0 {
return false
}
+ if f.to > 0 {
+ w.appendPaddingTo(f.to)
+ return true
+ }
for i := 0; i < f.size && w.avail() > 0; i++ {
w.b = append(w.b, frameTypePadding)
}
return true
}
+func (f debugFramePadding) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "padding"),
+ slog.Int("length", f.size),
+ )
+}
+
// debugFramePing is a PING frame.
type debugFramePing struct{}
@@ -112,6 +128,12 @@ func (f debugFramePing) write(w *packetWriter) bool {
return w.appendPingFrame()
}
+func (f debugFramePing) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "ping"),
+ )
+}
+
// debugFrameAck is an ACK frame.
type debugFrameAck struct {
ackDelay unscaledAckDelay
@@ -120,13 +142,13 @@ type debugFrameAck struct {
func parseDebugFrameAck(b []byte) (f debugFrameAck, n int) {
f.ranges = nil
- _, f.ackDelay, n = consumeAckFrame(b, func(start, end packetNumber) {
+ _, f.ackDelay, n = consumeAckFrame(b, func(_ int, start, end packetNumber) {
f.ranges = append(f.ranges, i64range[packetNumber]{
start: start,
end: end,
})
})
- // Ranges are parsed smallest to highest; reverse ranges slice to order them high to low.
+ // Ranges are parsed high to low; reverse ranges slice to order them low to high.
for i := 0; i < len(f.ranges)/2; i++ {
j := len(f.ranges) - 1
f.ranges[i], f.ranges[j] = f.ranges[j], f.ranges[i]
@@ -146,6 +168,61 @@ func (f debugFrameAck) write(w *packetWriter) bool {
return w.appendAckFrame(rangeset[packetNumber](f.ranges), f.ackDelay)
}
+func (f debugFrameAck) LogValue() slog.Value {
+ return slog.StringValue("error: debugFrameAck should not appear as a slog Value")
+}
+
+// debugFrameScaledAck is an ACK frame with scaled ACK Delay.
+//
+// This type is used in qlog events, which need access to the delay as a duration.
+type debugFrameScaledAck struct {
+ ackDelay time.Duration
+ ranges []i64range[packetNumber]
+}
+
+func (f debugFrameScaledAck) LogValue() slog.Value {
+ var ackDelay slog.Attr
+ if f.ackDelay >= 0 {
+ ackDelay = slog.Duration("ack_delay", f.ackDelay)
+ }
+ return slog.GroupValue(
+ slog.String("frame_type", "ack"),
+ // Rather than trying to convert the ack ranges into the slog data model,
+ // pass a value that can JSON-encode itself.
+ slog.Any("acked_ranges", debugAckRanges(f.ranges)),
+ ackDelay,
+ )
+}
+
+type debugAckRanges []i64range[packetNumber]
+
+// AppendJSON appends a JSON encoding of the ack ranges to b, and returns it.
+// This is different than the standard json.Marshaler, but more efficient.
+// Since we only use this in cooperation with the qlog package,
+// encoding/json compatibility is irrelevant.
+func (r debugAckRanges) AppendJSON(b []byte) []byte {
+ b = append(b, '[')
+ for i, ar := range r {
+ start, end := ar.start, ar.end-1 // qlog ranges are closed-closed
+ if i != 0 {
+ b = append(b, ',')
+ }
+ b = append(b, '[')
+ b = strconv.AppendInt(b, int64(start), 10)
+ if start != end {
+ b = append(b, ',')
+ b = strconv.AppendInt(b, int64(end), 10)
+ }
+ b = append(b, ']')
+ }
+ b = append(b, ']')
+ return b
+}
+
+func (r debugAckRanges) String() string {
+ return string(r.AppendJSON(nil))
+}
+
// debugFrameResetStream is a RESET_STREAM frame.
type debugFrameResetStream struct {
id streamID
@@ -166,6 +243,14 @@ func (f debugFrameResetStream) write(w *packetWriter) bool {
return w.appendResetStreamFrame(f.id, f.code, f.finalSize)
}
+func (f debugFrameResetStream) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "reset_stream"),
+ slog.Uint64("stream_id", uint64(f.id)),
+ slog.Uint64("final_size", uint64(f.finalSize)),
+ )
+}
+
// debugFrameStopSending is a STOP_SENDING frame.
type debugFrameStopSending struct {
id streamID
@@ -185,6 +270,14 @@ func (f debugFrameStopSending) write(w *packetWriter) bool {
return w.appendStopSendingFrame(f.id, f.code)
}
+func (f debugFrameStopSending) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "stop_sending"),
+ slog.Uint64("stream_id", uint64(f.id)),
+ slog.Uint64("error_code", uint64(f.code)),
+ )
+}
+
// debugFrameCrypto is a CRYPTO frame.
type debugFrameCrypto struct {
off int64
@@ -206,6 +299,14 @@ func (f debugFrameCrypto) write(w *packetWriter) bool {
return added
}
+func (f debugFrameCrypto) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "crypto"),
+ slog.Int64("offset", f.off),
+ slog.Int("length", len(f.data)),
+ )
+}
+
// debugFrameNewToken is a NEW_TOKEN frame.
type debugFrameNewToken struct {
token []byte
@@ -224,6 +325,13 @@ func (f debugFrameNewToken) write(w *packetWriter) bool {
return w.appendNewTokenFrame(f.token)
}
+func (f debugFrameNewToken) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "new_token"),
+ slogHexstring("token", f.token),
+ )
+}
+
// debugFrameStream is a STREAM frame.
type debugFrameStream struct {
id streamID
@@ -251,6 +359,20 @@ func (f debugFrameStream) write(w *packetWriter) bool {
return added
}
+func (f debugFrameStream) LogValue() slog.Value {
+ var fin slog.Attr
+ if f.fin {
+ fin = slog.Bool("fin", true)
+ }
+ return slog.GroupValue(
+ slog.String("frame_type", "stream"),
+ slog.Uint64("stream_id", uint64(f.id)),
+ slog.Int64("offset", f.off),
+ slog.Int("length", len(f.data)),
+ fin,
+ )
+}
+
// debugFrameMaxData is a MAX_DATA frame.
type debugFrameMaxData struct {
max int64
@@ -269,6 +391,13 @@ func (f debugFrameMaxData) write(w *packetWriter) bool {
return w.appendMaxDataFrame(f.max)
}
+func (f debugFrameMaxData) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "max_data"),
+ slog.Int64("maximum", f.max),
+ )
+}
+
// debugFrameMaxStreamData is a MAX_STREAM_DATA frame.
type debugFrameMaxStreamData struct {
id streamID
@@ -288,6 +417,14 @@ func (f debugFrameMaxStreamData) write(w *packetWriter) bool {
return w.appendMaxStreamDataFrame(f.id, f.max)
}
+func (f debugFrameMaxStreamData) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "max_stream_data"),
+ slog.Uint64("stream_id", uint64(f.id)),
+ slog.Int64("maximum", f.max),
+ )
+}
+
// debugFrameMaxStreams is a MAX_STREAMS frame.
type debugFrameMaxStreams struct {
streamType streamType
@@ -307,6 +444,14 @@ func (f debugFrameMaxStreams) write(w *packetWriter) bool {
return w.appendMaxStreamsFrame(f.streamType, f.max)
}
+func (f debugFrameMaxStreams) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "max_streams"),
+ slog.String("stream_type", f.streamType.qlogString()),
+ slog.Int64("maximum", f.max),
+ )
+}
+
// debugFrameDataBlocked is a DATA_BLOCKED frame.
type debugFrameDataBlocked struct {
max int64
@@ -325,6 +470,13 @@ func (f debugFrameDataBlocked) write(w *packetWriter) bool {
return w.appendDataBlockedFrame(f.max)
}
+func (f debugFrameDataBlocked) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "data_blocked"),
+ slog.Int64("limit", f.max),
+ )
+}
+
// debugFrameStreamDataBlocked is a STREAM_DATA_BLOCKED frame.
type debugFrameStreamDataBlocked struct {
id streamID
@@ -344,6 +496,14 @@ func (f debugFrameStreamDataBlocked) write(w *packetWriter) bool {
return w.appendStreamDataBlockedFrame(f.id, f.max)
}
+func (f debugFrameStreamDataBlocked) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "stream_data_blocked"),
+ slog.Uint64("stream_id", uint64(f.id)),
+ slog.Int64("limit", f.max),
+ )
+}
+
// debugFrameStreamsBlocked is a STREAMS_BLOCKED frame.
type debugFrameStreamsBlocked struct {
streamType streamType
@@ -363,12 +523,20 @@ func (f debugFrameStreamsBlocked) write(w *packetWriter) bool {
return w.appendStreamsBlockedFrame(f.streamType, f.max)
}
+func (f debugFrameStreamsBlocked) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "streams_blocked"),
+ slog.String("stream_type", f.streamType.qlogString()),
+ slog.Int64("limit", f.max),
+ )
+}
+
// debugFrameNewConnectionID is a NEW_CONNECTION_ID frame.
type debugFrameNewConnectionID struct {
seq int64
retirePriorTo int64
connID []byte
- token [16]byte
+ token statelessResetToken
}
func parseDebugFrameNewConnectionID(b []byte) (f debugFrameNewConnectionID, n int) {
@@ -384,12 +552,19 @@ func (f debugFrameNewConnectionID) write(w *packetWriter) bool {
return w.appendNewConnectionIDFrame(f.seq, f.retirePriorTo, f.connID, f.token)
}
+func (f debugFrameNewConnectionID) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "new_connection_id"),
+ slog.Int64("sequence_number", f.seq),
+ slog.Int64("retire_prior_to", f.retirePriorTo),
+ slogHexstring("connection_id", f.connID),
+ slogHexstring("stateless_reset_token", f.token[:]),
+ )
+}
+
// debugFrameRetireConnectionID is a NEW_CONNECTION_ID frame.
type debugFrameRetireConnectionID struct {
- seq uint64
- retirePriorTo uint64
- connID []byte
- token [16]byte
+ seq int64
}
func parseDebugFrameRetireConnectionID(b []byte) (f debugFrameRetireConnectionID, n int) {
@@ -405,9 +580,16 @@ func (f debugFrameRetireConnectionID) write(w *packetWriter) bool {
return w.appendRetireConnectionIDFrame(f.seq)
}
+func (f debugFrameRetireConnectionID) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "retire_connection_id"),
+ slog.Int64("sequence_number", f.seq),
+ )
+}
+
// debugFramePathChallenge is a PATH_CHALLENGE frame.
type debugFramePathChallenge struct {
- data uint64
+ data pathChallengeData
}
func parseDebugFramePathChallenge(b []byte) (f debugFramePathChallenge, n int) {
@@ -416,16 +598,23 @@ func parseDebugFramePathChallenge(b []byte) (f debugFramePathChallenge, n int) {
}
func (f debugFramePathChallenge) String() string {
- return fmt.Sprintf("PATH_CHALLENGE Data=%016x", f.data)
+ return fmt.Sprintf("PATH_CHALLENGE Data=%x", f.data)
}
func (f debugFramePathChallenge) write(w *packetWriter) bool {
return w.appendPathChallengeFrame(f.data)
}
+func (f debugFramePathChallenge) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "path_challenge"),
+ slog.String("data", fmt.Sprintf("%x", f.data)),
+ )
+}
+
// debugFramePathResponse is a PATH_RESPONSE frame.
type debugFramePathResponse struct {
- data uint64
+ data pathChallengeData
}
func parseDebugFramePathResponse(b []byte) (f debugFramePathResponse, n int) {
@@ -434,13 +623,20 @@ func parseDebugFramePathResponse(b []byte) (f debugFramePathResponse, n int) {
}
func (f debugFramePathResponse) String() string {
- return fmt.Sprintf("PATH_RESPONSE Data=%016x", f.data)
+ return fmt.Sprintf("PATH_RESPONSE Data=%x", f.data)
}
func (f debugFramePathResponse) write(w *packetWriter) bool {
return w.appendPathResponseFrame(f.data)
}
+func (f debugFramePathResponse) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "path_response"),
+ slog.String("data", fmt.Sprintf("%x", f.data)),
+ )
+}
+
// debugFrameConnectionCloseTransport is a CONNECTION_CLOSE frame carrying a transport error.
type debugFrameConnectionCloseTransport struct {
code transportError
@@ -468,6 +664,15 @@ func (f debugFrameConnectionCloseTransport) write(w *packetWriter) bool {
return w.appendConnectionCloseTransportFrame(f.code, f.frameType, f.reason)
}
+func (f debugFrameConnectionCloseTransport) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "connection_close"),
+ slog.String("error_space", "transport"),
+ slog.Uint64("error_code_value", uint64(f.code)),
+ slog.String("reason", f.reason),
+ )
+}
+
// debugFrameConnectionCloseApplication is a CONNECTION_CLOSE frame carrying an application error.
type debugFrameConnectionCloseApplication struct {
code uint64
@@ -491,6 +696,15 @@ func (f debugFrameConnectionCloseApplication) write(w *packetWriter) bool {
return w.appendConnectionCloseApplicationFrame(f.code, f.reason)
}
+func (f debugFrameConnectionCloseApplication) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "connection_close"),
+ slog.String("error_space", "application"),
+ slog.Uint64("error_code_value", uint64(f.code)),
+ slog.String("reason", f.reason),
+ )
+}
+
// debugFrameHandshakeDone is a HANDSHAKE_DONE frame.
type debugFrameHandshakeDone struct{}
@@ -505,3 +719,9 @@ func (f debugFrameHandshakeDone) String() string {
func (f debugFrameHandshakeDone) write(w *packetWriter) bool {
return w.appendHandshakeDoneFrame()
}
+
+func (f debugFrameHandshakeDone) LogValue() slog.Value {
+ return slog.GroupValue(
+ slog.String("frame_type", "handshake_done"),
+ )
+}
diff --git a/quic/gate.go b/quic/gate.go
new file mode 100644
index 000000000..a2fb53711
--- /dev/null
+++ b/quic/gate.go
@@ -0,0 +1,91 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import "context"
+
+// An gate is a monitor (mutex + condition variable) with one bit of state.
+//
+// The condition may be either set or unset.
+// Lock operations may be unconditional, or wait for the condition to be set.
+// Unlock operations record the new state of the condition.
+type gate struct {
+ // When unlocked, exactly one of set or unset contains a value.
+ // When locked, neither chan contains a value.
+ set chan struct{}
+ unset chan struct{}
+}
+
+// newGate returns a new, unlocked gate with the condition unset.
+func newGate() gate {
+ g := newLockedGate()
+ g.unlock(false)
+ return g
+}
+
+// newLocked gate returns a new, locked gate.
+func newLockedGate() gate {
+ return gate{
+ set: make(chan struct{}, 1),
+ unset: make(chan struct{}, 1),
+ }
+}
+
+// lock acquires the gate unconditionally.
+// It reports whether the condition is set.
+func (g *gate) lock() (set bool) {
+ select {
+ case <-g.set:
+ return true
+ case <-g.unset:
+ return false
+ }
+}
+
+// waitAndLock waits until the condition is set before acquiring the gate.
+// If the context expires, waitAndLock returns an error and does not acquire the gate.
+func (g *gate) waitAndLock(ctx context.Context, testHooks connTestHooks) error {
+ if testHooks != nil {
+ return testHooks.waitUntil(ctx, g.lockIfSet)
+ }
+ select {
+ case <-g.set:
+ return nil
+ default:
+ }
+ select {
+ case <-g.set:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// lockIfSet acquires the gate if and only if the condition is set.
+func (g *gate) lockIfSet() (acquired bool) {
+ select {
+ case <-g.set:
+ return true
+ default:
+ return false
+ }
+}
+
+// unlock sets the condition and releases the gate.
+func (g *gate) unlock(set bool) {
+ if set {
+ g.set <- struct{}{}
+ } else {
+ g.unset <- struct{}{}
+ }
+}
+
+// unlock sets the condition to the result of f and releases the gate.
+// Useful in defers.
+func (g *gate) unlockFunc(f func() bool) {
+ g.unlock(f())
+}
diff --git a/quic/gate_test.go b/quic/gate_test.go
new file mode 100644
index 000000000..9e84a84bd
--- /dev/null
+++ b/quic/gate_test.go
@@ -0,0 +1,95 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "testing"
+ "time"
+)
+
+func TestGateLockAndUnlock(t *testing.T) {
+ g := newGate()
+ if set := g.lock(); set {
+ t.Errorf("g.lock() of never-locked gate: true, want false")
+ }
+ unlockedc := make(chan struct{})
+ donec := make(chan struct{})
+ go func() {
+ defer close(donec)
+ set := g.lock()
+ select {
+ case <-unlockedc:
+ default:
+ t.Errorf("g.lock() succeeded while gate was held")
+ }
+ if !set {
+ t.Errorf("g.lock() of set gate: false, want true")
+ }
+ g.unlock(false)
+ }()
+ time.Sleep(1 * time.Millisecond)
+ close(unlockedc)
+ g.unlock(true)
+ <-donec
+ if set := g.lock(); set {
+ t.Errorf("g.lock() of unset gate: true, want false")
+ }
+}
+
+func TestGateWaitAndLockContext(t *testing.T) {
+ g := newGate()
+ // waitAndLock is canceled
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ time.Sleep(1 * time.Millisecond)
+ cancel()
+ }()
+ if err := g.waitAndLock(ctx, nil); err != context.Canceled {
+ t.Errorf("g.waitAndLock() = %v, want context.Canceled", err)
+ }
+ // waitAndLock succeeds
+ set := false
+ go func() {
+ time.Sleep(1 * time.Millisecond)
+ g.lock()
+ set = true
+ g.unlock(true)
+ }()
+ if err := g.waitAndLock(context.Background(), nil); err != nil {
+ t.Errorf("g.waitAndLock() = %v, want nil", err)
+ }
+ if !set {
+ t.Errorf("g.waitAndLock() returned before gate was set")
+ }
+ g.unlock(true)
+ // waitAndLock succeeds when the gate is set and the context is canceled
+ if err := g.waitAndLock(ctx, nil); err != nil {
+ t.Errorf("g.waitAndLock() = %v, want nil", err)
+ }
+}
+
+func TestGateLockIfSet(t *testing.T) {
+ g := newGate()
+ if locked := g.lockIfSet(); locked {
+ t.Errorf("g.lockIfSet() of unset gate = %v, want false", locked)
+ }
+ g.lock()
+ g.unlock(true)
+ if locked := g.lockIfSet(); !locked {
+ t.Errorf("g.lockIfSet() of set gate = %v, want true", locked)
+ }
+}
+
+func TestGateUnlockFunc(t *testing.T) {
+ g := newGate()
+ go func() {
+ g.lock()
+ defer g.unlockFunc(func() bool { return true })
+ }()
+ g.waitAndLock(context.Background(), nil)
+}
diff --git a/quic/gotraceback_test.go b/quic/gotraceback_test.go
new file mode 100644
index 000000000..c22702faa
--- /dev/null
+++ b/quic/gotraceback_test.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21 && unix
+
+package quic
+
+import (
+ "os"
+ "os/signal"
+ "runtime/debug"
+ "syscall"
+)
+
+// When killed with SIGQUIT (C-\), print stacks with GOTRACEBACK=all rather than system,
+// to reduce irrelevant noise when debugging hung tests.
+func init() {
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, syscall.SIGQUIT)
+ go func() {
+ <-ch
+ debug.SetTraceback("all")
+ panic("SIGQUIT")
+ }()
+}
diff --git a/quic/idle.go b/quic/idle.go
new file mode 100644
index 000000000..f5b2422ad
--- /dev/null
+++ b/quic/idle.go
@@ -0,0 +1,170 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "time"
+)
+
+// idleState tracks connection idle events.
+//
+// Before the handshake is confirmed, the idle timeout is Config.HandshakeTimeout.
+//
+// After the handshake is confirmed, the idle timeout is
+// the minimum of Config.MaxIdleTimeout and the peer's max_idle_timeout transport parameter.
+//
+// If KeepAlivePeriod is set, keep-alive pings are sent.
+// Keep-alives are only sent after the handshake is confirmed.
+//
+// https://www.rfc-editor.org/rfc/rfc9000#section-10.1
+type idleState struct {
+ // idleDuration is the negotiated idle timeout for the connection.
+ idleDuration time.Duration
+
+ // idleTimeout is the time at which the connection will be closed due to inactivity.
+ idleTimeout time.Time
+
+ // nextTimeout is the time of the next idle event.
+ // If nextTimeout == idleTimeout, this is the idle timeout.
+ // Otherwise, this is the keep-alive timeout.
+ nextTimeout time.Time
+
+ // sentSinceLastReceive is set if we have sent an ack-eliciting packet
+ // since the last time we received and processed a packet from the peer.
+ sentSinceLastReceive bool
+}
+
+// receivePeerMaxIdleTimeout handles the peer's max_idle_timeout transport parameter.
+func (c *Conn) receivePeerMaxIdleTimeout(peerMaxIdleTimeout time.Duration) {
+ localMaxIdleTimeout := c.config.maxIdleTimeout()
+ switch {
+ case localMaxIdleTimeout == 0:
+ c.idle.idleDuration = peerMaxIdleTimeout
+ case peerMaxIdleTimeout == 0:
+ c.idle.idleDuration = localMaxIdleTimeout
+ default:
+ c.idle.idleDuration = min(localMaxIdleTimeout, peerMaxIdleTimeout)
+ }
+}
+
+func (c *Conn) idleHandlePacketReceived(now time.Time) {
+ if !c.handshakeConfirmed.isSet() {
+ return
+ }
+ // "An endpoint restarts its idle timer when a packet from its peer is
+ // received and processed successfully."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-10.1-3
+ c.idle.sentSinceLastReceive = false
+ c.restartIdleTimer(now)
+}
+
+func (c *Conn) idleHandlePacketSent(now time.Time, sent *sentPacket) {
+ // "An endpoint also restarts its idle timer when sending an ack-eliciting packet
+ // if no other ack-eliciting packets have been sent since
+ // last receiving and processing a packet."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-10.1-3
+ if c.idle.sentSinceLastReceive || !sent.ackEliciting || !c.handshakeConfirmed.isSet() {
+ return
+ }
+ c.idle.sentSinceLastReceive = true
+ c.restartIdleTimer(now)
+}
+
+func (c *Conn) restartIdleTimer(now time.Time) {
+ if !c.isAlive() {
+ // Connection is closing, disable timeouts.
+ c.idle.idleTimeout = time.Time{}
+ c.idle.nextTimeout = time.Time{}
+ return
+ }
+ var idleDuration time.Duration
+ if c.handshakeConfirmed.isSet() {
+ idleDuration = c.idle.idleDuration
+ } else {
+ idleDuration = c.config.handshakeTimeout()
+ }
+ if idleDuration == 0 {
+ c.idle.idleTimeout = time.Time{}
+ } else {
+ // "[...] endpoints MUST increase the idle timeout period to be
+ // at least three times the current Probe Timeout (PTO)."
+ // https://www.rfc-editor.org/rfc/rfc9000#section-10.1-4
+ idleDuration = max(idleDuration, 3*c.loss.ptoPeriod())
+ c.idle.idleTimeout = now.Add(idleDuration)
+ }
+ // Set the time of our next event:
+ // The idle timer if no keep-alive is set, or the keep-alive timer if one is.
+ c.idle.nextTimeout = c.idle.idleTimeout
+ keepAlive := c.config.keepAlivePeriod()
+ switch {
+ case !c.handshakeConfirmed.isSet():
+ // We do not send keep-alives before the handshake is complete.
+ case keepAlive <= 0:
+ // Keep-alives are not enabled.
+ case c.idle.sentSinceLastReceive:
+ // We have sent an ack-eliciting packet to the peer.
+ // If they don't acknowledge it, loss detection will follow up with PTO probes,
+ // which will function as keep-alives.
+ // We don't need to send further pings.
+ case idleDuration == 0:
+ // The connection does not have a negotiated idle timeout.
+ // Send keep-alives anyway, since they may be required to keep middleboxes
+ // from losing state.
+ c.idle.nextTimeout = now.Add(keepAlive)
+ default:
+ // Schedule our next keep-alive.
+ // If our configured keep-alive period is greater than half the negotiated
+ // connection idle timeout, we reduce the keep-alive period to half
+ // the idle timeout to ensure we have time for the ping to arrive.
+ c.idle.nextTimeout = now.Add(min(keepAlive, idleDuration/2))
+ }
+}
+
+func (c *Conn) appendKeepAlive(now time.Time) bool {
+ if c.idle.nextTimeout.IsZero() || c.idle.nextTimeout.After(now) {
+ return true // timer has not expired
+ }
+ if c.idle.nextTimeout.Equal(c.idle.idleTimeout) {
+ return true // no keepalive timer set, only idle
+ }
+ if c.idle.sentSinceLastReceive {
+ return true // already sent an ack-eliciting packet
+ }
+ if c.w.sent.ackEliciting {
+ return true // this packet is already ack-eliciting
+ }
+ // Send an ack-eliciting PING frame to the peer to keep the connection alive.
+ return c.w.appendPingFrame()
+}
+
+var errHandshakeTimeout error = localTransportError{
+ code: errConnectionRefused,
+ reason: "handshake timeout",
+}
+
+func (c *Conn) idleAdvance(now time.Time) (shouldExit bool) {
+ if c.idle.idleTimeout.IsZero() || now.Before(c.idle.idleTimeout) {
+ return false
+ }
+ c.idle.idleTimeout = time.Time{}
+ c.idle.nextTimeout = time.Time{}
+ if !c.handshakeConfirmed.isSet() {
+ // Handshake timeout has expired.
+ // If we're a server, we're refusing the too-slow client.
+ // If we're a client, we're giving up.
+ // In either case, we're going to send a CONNECTION_CLOSE frame and
+ // enter the closing state rather than unceremoniously dropping the connection,
+ // since the peer might still be trying to complete the handshake.
+ c.abort(now, errHandshakeTimeout)
+ return false
+ }
+ // Idle timeout has expired.
+ //
+ // "[...] the connection is silently closed and its state is discarded [...]"
+ // https://www.rfc-editor.org/rfc/rfc9000#section-10.1-1
+ return true
+}
diff --git a/quic/idle_test.go b/quic/idle_test.go
new file mode 100644
index 000000000..18f6a690a
--- /dev/null
+++ b/quic/idle_test.go
@@ -0,0 +1,225 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "testing"
+ "time"
+)
+
+func TestHandshakeTimeoutExpiresServer(t *testing.T) {
+ const timeout = 5 * time.Second
+ tc := newTestConn(t, serverSide, func(c *Config) {
+ c.HandshakeTimeout = timeout
+ })
+ tc.ignoreFrame(frameTypeAck)
+ tc.ignoreFrame(frameTypeNewConnectionID)
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ // Server starts its end of the handshake.
+ // Client acks these packets to avoid starting the PTO timer.
+ tc.wantFrameType("server sends Initial CRYPTO flight",
+ packetTypeInitial, debugFrameCrypto{})
+ tc.writeAckForAll()
+ tc.wantFrameType("server sends Handshake CRYPTO flight",
+ packetTypeHandshake, debugFrameCrypto{})
+ tc.writeAckForAll()
+
+ if got, want := tc.timerDelay(), timeout; got != want {
+ t.Errorf("connection timer = %v, want %v (handshake timeout)", got, want)
+ }
+
+ // Client sends a packet, but this does not extend the handshake timer.
+ tc.advance(1 * time.Second)
+ tc.writeFrames(packetTypeHandshake, debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelHandshake][:1], // partial data
+ })
+ tc.wantIdle("handshake is not complete")
+
+ tc.advance(timeout - 1*time.Second)
+ tc.wantFrame("server closes connection after handshake timeout",
+ packetTypeHandshake, debugFrameConnectionCloseTransport{
+ code: errConnectionRefused,
+ })
+}
+
+func TestHandshakeTimeoutExpiresClient(t *testing.T) {
+ const timeout = 5 * time.Second
+ tc := newTestConn(t, clientSide, func(c *Config) {
+ c.HandshakeTimeout = timeout
+ })
+ tc.ignoreFrame(frameTypeAck)
+ tc.ignoreFrame(frameTypeNewConnectionID)
+ // Start the handshake.
+ // The client always sets a PTO timer until it gets an ack for a handshake packet
+ // or confirms the handshake, so proceed far enough through the handshake to
+ // let us not worry about PTO.
+ tc.wantFrameType("client sends Initial CRYPTO flight",
+ packetTypeInitial, debugFrameCrypto{})
+ tc.writeAckForAll()
+ tc.writeFrames(packetTypeInitial,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelInitial],
+ })
+ tc.writeFrames(packetTypeHandshake,
+ debugFrameCrypto{
+ data: tc.cryptoDataIn[tls.QUICEncryptionLevelHandshake],
+ })
+ tc.wantFrameType("client sends Handshake CRYPTO flight",
+ packetTypeHandshake, debugFrameCrypto{})
+ tc.writeAckForAll()
+ tc.wantIdle("client is waiting for end of handshake")
+
+ if got, want := tc.timerDelay(), timeout; got != want {
+ t.Errorf("connection timer = %v, want %v (handshake timeout)", got, want)
+ }
+ tc.advance(timeout)
+ tc.wantFrame("client closes connection after handshake timeout",
+ packetTypeHandshake, debugFrameConnectionCloseTransport{
+ code: errConnectionRefused,
+ })
+}
+
+func TestIdleTimeoutExpires(t *testing.T) {
+ for _, test := range []struct {
+ localMaxIdleTimeout time.Duration
+ peerMaxIdleTimeout time.Duration
+ wantTimeout time.Duration
+ }{{
+ localMaxIdleTimeout: 10 * time.Second,
+ peerMaxIdleTimeout: 20 * time.Second,
+ wantTimeout: 10 * time.Second,
+ }, {
+ localMaxIdleTimeout: 20 * time.Second,
+ peerMaxIdleTimeout: 10 * time.Second,
+ wantTimeout: 10 * time.Second,
+ }, {
+ localMaxIdleTimeout: 0,
+ peerMaxIdleTimeout: 10 * time.Second,
+ wantTimeout: 10 * time.Second,
+ }, {
+ localMaxIdleTimeout: 10 * time.Second,
+ peerMaxIdleTimeout: 0,
+ wantTimeout: 10 * time.Second,
+ }} {
+ name := fmt.Sprintf("local=%v/peer=%v", test.localMaxIdleTimeout, test.peerMaxIdleTimeout)
+ t.Run(name, func(t *testing.T) {
+ tc := newTestConn(t, serverSide, func(p *transportParameters) {
+ p.maxIdleTimeout = test.peerMaxIdleTimeout
+ }, func(c *Config) {
+ c.MaxIdleTimeout = test.localMaxIdleTimeout
+ })
+ tc.handshake()
+ if got, want := tc.timeUntilEvent(), test.wantTimeout; got != want {
+ t.Errorf("new conn timeout=%v, want %v (idle timeout)", got, want)
+ }
+ tc.advance(test.wantTimeout - 1)
+ tc.wantIdle("connection is idle and alive prior to timeout")
+ ctx := canceledContext()
+ if err := tc.conn.Wait(ctx); err != context.Canceled {
+ t.Fatalf("conn.Wait() = %v, want Canceled", err)
+ }
+ tc.advance(1)
+ tc.wantIdle("connection exits after timeout")
+ if err := tc.conn.Wait(ctx); err != errIdleTimeout {
+ t.Fatalf("conn.Wait() = %v, want errIdleTimeout", err)
+ }
+ })
+ }
+}
+
+func TestIdleTimeoutKeepAlive(t *testing.T) {
+ for _, test := range []struct {
+ idleTimeout time.Duration
+ keepAlive time.Duration
+ wantTimeout time.Duration
+ }{{
+ idleTimeout: 30 * time.Second,
+ keepAlive: 10 * time.Second,
+ wantTimeout: 10 * time.Second,
+ }, {
+ idleTimeout: 10 * time.Second,
+ keepAlive: 30 * time.Second,
+ wantTimeout: 5 * time.Second,
+ }, {
+ idleTimeout: -1, // disabled
+ keepAlive: 30 * time.Second,
+ wantTimeout: 30 * time.Second,
+ }} {
+ name := fmt.Sprintf("idle_timeout=%v/keepalive=%v", test.idleTimeout, test.keepAlive)
+ t.Run(name, func(t *testing.T) {
+ tc := newTestConn(t, serverSide, func(c *Config) {
+ c.MaxIdleTimeout = test.idleTimeout
+ c.KeepAlivePeriod = test.keepAlive
+ })
+ tc.handshake()
+ if got, want := tc.timeUntilEvent(), test.wantTimeout; got != want {
+ t.Errorf("new conn timeout=%v, want %v (keepalive timeout)", got, want)
+ }
+ tc.advance(test.wantTimeout - 1)
+ tc.wantIdle("connection is idle prior to timeout")
+ tc.advance(1)
+ tc.wantFrameType("keep-alive ping is sent", packetType1RTT,
+ debugFramePing{})
+ })
+ }
+}
+
+func TestIdleLongTermKeepAliveSent(t *testing.T) {
+ // This test examines a connection sitting idle and sending periodic keep-alive pings.
+ const keepAlivePeriod = 30 * time.Second
+ tc := newTestConn(t, clientSide, func(c *Config) {
+ c.KeepAlivePeriod = keepAlivePeriod
+ c.MaxIdleTimeout = -1
+ })
+ tc.handshake()
+ // The handshake will have completed a little bit after the point at which the
+ // keepalive timer was set. Send two PING frames to the conn, triggering an immediate ack
+ // and resetting the timer.
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.wantFrameType("conn acks received pings", packetType1RTT, debugFrameAck{})
+ for i := 0; i < 10; i++ {
+ tc.wantIdle("conn has nothing more to send")
+ if got, want := tc.timeUntilEvent(), keepAlivePeriod; got != want {
+ t.Errorf("i=%v conn timeout=%v, want %v (keepalive timeout)", i, got, want)
+ }
+ tc.advance(keepAlivePeriod)
+ tc.wantFrameType("keep-alive ping is sent", packetType1RTT,
+ debugFramePing{})
+ tc.writeAckForAll()
+ }
+}
+
+func TestIdleLongTermKeepAliveReceived(t *testing.T) {
+ // This test examines a connection sitting idle, but receiving periodic peer
+ // traffic to keep the connection alive.
+ const idleTimeout = 30 * time.Second
+ tc := newTestConn(t, serverSide, func(c *Config) {
+ c.MaxIdleTimeout = idleTimeout
+ })
+ tc.handshake()
+ for i := 0; i < 10; i++ {
+ tc.advance(idleTimeout - 1*time.Second)
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ if got, want := tc.timeUntilEvent(), maxAckDelay-timerGranularity; got != want {
+ t.Errorf("i=%v conn timeout=%v, want %v (max_ack_delay)", i, got, want)
+ }
+ tc.advanceToTimer()
+ tc.wantFrameType("conn acks received ping", packetType1RTT, debugFrameAck{})
+ }
+ // Connection is still alive.
+ ctx := canceledContext()
+ if err := tc.conn.Wait(ctx); err != context.Canceled {
+ t.Fatalf("conn.Wait() = %v, want Canceled", err)
+ }
+}
diff --git a/quic/key_update_test.go b/quic/key_update_test.go
new file mode 100644
index 000000000..4a4d67771
--- /dev/null
+++ b/quic/key_update_test.go
@@ -0,0 +1,234 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "testing"
+)
+
+func TestKeyUpdatePeerUpdates(t *testing.T) {
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+ tc.ignoreFrames = nil // ignore nothing
+
+ // Peer initiates a key update.
+ tc.sendKeyNumber = 1
+ tc.sendKeyPhaseBit = true
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+
+ // We update to the new key.
+ tc.advanceToTimer()
+ tc.wantFrameType("conn ACKs last packet",
+ packetType1RTT, debugFrameAck{})
+ tc.wantFrame("first packet after a key update is always ack-eliciting",
+ packetType1RTT, debugFramePing{})
+ if got, want := tc.lastPacket.keyNumber, 1; got != want {
+ t.Errorf("after key rotation, conn sent packet with key %v, want %v", got, want)
+ }
+ if !tc.lastPacket.keyPhaseBit {
+ t.Errorf("after key rotation, conn failed to change Key Phase bit")
+ }
+ tc.wantIdle("conn has nothing to send")
+
+ // Peer's ACK of a packet we sent in the new phase completes the update.
+ tc.writeAckForAll()
+
+ // Peer initiates a second key update.
+ tc.sendKeyNumber = 2
+ tc.sendKeyPhaseBit = false
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+
+ // We update to the new key.
+ tc.advanceToTimer()
+ tc.wantFrameType("conn ACKs last packet",
+ packetType1RTT, debugFrameAck{})
+ tc.wantFrame("first packet after a key update is always ack-eliciting",
+ packetType1RTT, debugFramePing{})
+ if got, want := tc.lastPacket.keyNumber, 2; got != want {
+ t.Errorf("after key rotation, conn sent packet with key %v, want %v", got, want)
+ }
+ if tc.lastPacket.keyPhaseBit {
+ t.Errorf("after second key rotation, conn failed to change Key Phase bit")
+ }
+ tc.wantIdle("conn has nothing to send")
+}
+
+func TestKeyUpdateAcceptPreviousPhaseKeys(t *testing.T) {
+ // "An endpoint SHOULD retain old keys for some time after
+ // unprotecting a packet sent using the new keys."
+ // https://www.rfc-editor.org/rfc/rfc9001#section-6.1-8
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+ tc.ignoreFrames = nil // ignore nothing
+
+ // Peer initiates a key update, skipping one packet number.
+ pnum0 := tc.peerNextPacketNum[appDataSpace]
+ tc.peerNextPacketNum[appDataSpace]++
+ tc.sendKeyNumber = 1
+ tc.sendKeyPhaseBit = true
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+
+ // We update to the new key.
+ // This ACK is not delayed, because we've skipped a packet number.
+ tc.wantFrame("conn ACKs last packet",
+ packetType1RTT, debugFrameAck{
+ ranges: []i64range[packetNumber]{
+ {0, pnum0},
+ {pnum0 + 1, pnum0 + 2},
+ },
+ })
+ tc.wantFrame("first packet after a key update is always ack-eliciting",
+ packetType1RTT, debugFramePing{})
+ if got, want := tc.lastPacket.keyNumber, 1; got != want {
+ t.Errorf("after key rotation, conn sent packet with key %v, want %v", got, want)
+ }
+ if !tc.lastPacket.keyPhaseBit {
+ t.Errorf("after key rotation, conn failed to change Key Phase bit")
+ }
+ tc.wantIdle("conn has nothing to send")
+
+ // We receive the previously-skipped packet in the earlier key phase.
+ tc.peerNextPacketNum[appDataSpace] = pnum0
+ tc.sendKeyNumber = 0
+ tc.sendKeyPhaseBit = false
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+
+ // We ack the reordered packet immediately, still in the new key phase.
+ tc.wantFrame("conn ACKs reordered packet",
+ packetType1RTT, debugFrameAck{
+ ranges: []i64range[packetNumber]{
+ {0, pnum0 + 2},
+ },
+ })
+ tc.wantIdle("packet is not ack-eliciting")
+ if got, want := tc.lastPacket.keyNumber, 1; got != want {
+ t.Errorf("after key rotation, conn sent packet with key %v, want %v", got, want)
+ }
+ if !tc.lastPacket.keyPhaseBit {
+ t.Errorf("after key rotation, conn failed to change Key Phase bit")
+ }
+}
+
+func TestKeyUpdateRejectPacketFromPriorPhase(t *testing.T) {
+ // "Packets with higher packet numbers MUST be protected with either
+ // the same or newer packet protection keys than packets with lower packet numbers."
+ // https://www.rfc-editor.org/rfc/rfc9001#section-6.4-2
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+ tc.ignoreFrames = nil // ignore nothing
+
+ // Peer initiates a key update.
+ tc.sendKeyNumber = 1
+ tc.sendKeyPhaseBit = true
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+
+ // We update to the new key.
+ tc.advanceToTimer()
+ tc.wantFrameType("conn ACKs last packet",
+ packetType1RTT, debugFrameAck{})
+ tc.wantFrame("first packet after a key update is always ack-eliciting",
+ packetType1RTT, debugFramePing{})
+ if got, want := tc.lastPacket.keyNumber, 1; got != want {
+ t.Errorf("after key rotation, conn sent packet with key %v, want %v", got, want)
+ }
+ if !tc.lastPacket.keyPhaseBit {
+ t.Errorf("after key rotation, conn failed to change Key Phase bit")
+ }
+ tc.wantIdle("conn has nothing to send")
+
+ // Peer sends an ack-eliciting packet using the prior phase keys.
+ // We fail to unprotect the packet and ignore it.
+ skipped := tc.peerNextPacketNum[appDataSpace]
+ tc.sendKeyNumber = 0
+ tc.sendKeyPhaseBit = false
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+
+ // Peer sends an ack-eliciting packet using the current phase keys.
+ tc.sendKeyNumber = 1
+ tc.sendKeyPhaseBit = true
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+
+ // We ack the peer's packets, not including the one sent with the wrong keys.
+ tc.wantFrame("conn ACKs packets, not including packet sent with wrong keys",
+ packetType1RTT, debugFrameAck{
+ ranges: []i64range[packetNumber]{
+ {0, skipped},
+ {skipped + 1, skipped + 2},
+ },
+ })
+}
+
+func TestKeyUpdateLocallyInitiated(t *testing.T) {
+ const updateAfter = 4 // initiate key update after 1-RTT packet 4
+ tc := newTestConn(t, serverSide)
+ tc.conn.keysAppData.updateAfter = updateAfter
+ tc.handshake()
+
+ for {
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.advanceToTimer()
+ tc.wantFrameType("conn ACKs last packet",
+ packetType1RTT, debugFrameAck{})
+ if tc.lastPacket.num > updateAfter {
+ break
+ }
+ if got, want := tc.lastPacket.keyNumber, 0; got != want {
+ t.Errorf("before key update, conn sent packet with key %v, want %v", got, want)
+ }
+ if tc.lastPacket.keyPhaseBit {
+ t.Errorf("before key update, keyPhaseBit is set, want unset")
+ }
+ }
+ if got, want := tc.lastPacket.keyNumber, 1; got != want {
+ t.Errorf("after key update, conn sent packet with key %v, want %v", got, want)
+ }
+ if !tc.lastPacket.keyPhaseBit {
+ t.Errorf("after key update, keyPhaseBit is unset, want set")
+ }
+ tc.wantFrame("first packet after a key update is always ack-eliciting",
+ packetType1RTT, debugFramePing{})
+ tc.wantIdle("no more frames")
+
+ // Peer sends another packet using the prior phase keys.
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.advanceToTimer()
+ tc.wantFrameType("conn ACKs packet in prior phase",
+ packetType1RTT, debugFrameAck{})
+ tc.wantIdle("packet is not ack-eliciting")
+ if got, want := tc.lastPacket.keyNumber, 1; got != want {
+ t.Errorf("after key update, conn sent packet with key %v, want %v", got, want)
+ }
+
+ // Peer updates to the next phase.
+ tc.sendKeyNumber = 1
+ tc.sendKeyPhaseBit = true
+ tc.writeAckForAll()
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.advanceToTimer()
+ tc.wantFrameType("conn ACKs packet in current phase",
+ packetType1RTT, debugFrameAck{})
+ tc.wantIdle("packet is not ack-eliciting")
+ if got, want := tc.lastPacket.keyNumber, 1; got != want {
+ t.Errorf("after key update, conn sent packet with key %v, want %v", got, want)
+ }
+
+ // Peer initiates its own update.
+ tc.sendKeyNumber = 2
+ tc.sendKeyPhaseBit = false
+ tc.writeFrames(packetType1RTT, debugFramePing{})
+ tc.advanceToTimer()
+ tc.wantFrameType("conn ACKs packet in current phase",
+ packetType1RTT, debugFrameAck{})
+ tc.wantFrame("first packet after a key update is always ack-eliciting",
+ packetType1RTT, debugFramePing{})
+ if got, want := tc.lastPacket.keyNumber, 2; got != want {
+ t.Errorf("after peer key update, conn sent packet with key %v, want %v", got, want)
+ }
+ if tc.lastPacket.keyPhaseBit {
+ t.Errorf("after peer key update, keyPhaseBit is unset, want set")
+ }
+}
diff --git a/quic/log.go b/quic/log.go
new file mode 100644
index 000000000..d7248343b
--- /dev/null
+++ b/quic/log.go
@@ -0,0 +1,69 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+var logPackets bool
+
+// Parse GODEBUG settings.
+//
+// GODEBUG=quiclogpackets=1 -- log every packet sent and received.
+func init() {
+ s := os.Getenv("GODEBUG")
+ for len(s) > 0 {
+ var opt string
+ opt, s, _ = strings.Cut(s, ",")
+ switch opt {
+ case "quiclogpackets=1":
+ logPackets = true
+ }
+ }
+}
+
+func logInboundLongPacket(c *Conn, p longPacket) {
+ if !logPackets {
+ return
+ }
+ prefix := c.String()
+ fmt.Printf("%v recv %v %v\n", prefix, p.ptype, p.num)
+ logFrames(prefix+" <- ", p.payload)
+}
+
+func logInboundShortPacket(c *Conn, p shortPacket) {
+ if !logPackets {
+ return
+ }
+ prefix := c.String()
+ fmt.Printf("%v recv 1-RTT %v\n", prefix, p.num)
+ logFrames(prefix+" <- ", p.payload)
+}
+
+func logSentPacket(c *Conn, ptype packetType, pnum packetNumber, src, dst, payload []byte) {
+ if !logPackets || len(payload) == 0 {
+ return
+ }
+ prefix := c.String()
+ fmt.Printf("%v send %v %v\n", prefix, ptype, pnum)
+ logFrames(prefix+" -> ", payload)
+}
+
+func logFrames(prefix string, payload []byte) {
+ for len(payload) > 0 {
+ f, n := parseDebugFrame(payload)
+ if n < 0 {
+ fmt.Printf("%vBAD DATA\n", prefix)
+ break
+ }
+ payload = payload[n:]
+ fmt.Printf("%v%v\n", prefix, f)
+ }
+}
diff --git a/internal/quic/loss.go b/quic/loss.go
similarity index 85%
rename from internal/quic/loss.go
rename to quic/loss.go
index 152815a29..796b5f7a3 100644
--- a/internal/quic/loss.go
+++ b/quic/loss.go
@@ -7,6 +7,8 @@
package quic
import (
+ "context"
+ "log/slog"
"math"
"time"
)
@@ -50,6 +52,9 @@ type lossState struct {
// https://www.rfc-editor.org/rfc/rfc9000#section-8-2
antiAmplificationLimit int
+ // Count of non-ack-eliciting packets (ACKs) sent since the last ack-eliciting one.
+ consecutiveNonAckElicitingPackets int
+
rtt rttState
pacer pacerState
cc *ccReno
@@ -176,7 +181,7 @@ func (c *lossState) nextNumber(space numberSpace) packetNumber {
}
// packetSent records a sent packet.
-func (c *lossState) packetSent(now time.Time, space numberSpace, sent *sentPacket) {
+func (c *lossState) packetSent(now time.Time, log *slog.Logger, space numberSpace, sent *sentPacket) {
sent.time = now
c.spaces[space].add(sent)
size := sent.size
@@ -184,13 +189,21 @@ func (c *lossState) packetSent(now time.Time, space numberSpace, sent *sentPacke
c.antiAmplificationLimit = max(0, c.antiAmplificationLimit-size)
}
if sent.inFlight {
- c.cc.packetSent(now, space, sent)
+ c.cc.packetSent(now, log, space, sent)
c.pacer.packetSent(now, size, c.cc.congestionWindow, c.rtt.smoothedRTT)
if sent.ackEliciting {
c.spaces[space].lastAckEliciting = sent.num
c.ptoExpired = false // reset expired PTO timer after sending probe
}
c.scheduleTimer(now)
+ if logEnabled(log, QLogLevelPacket) {
+ logBytesInFlight(log, c.cc.bytesInFlight)
+ }
+ }
+ if sent.ackEliciting {
+ c.consecutiveNonAckElicitingPackets = 0
+ } else {
+ c.consecutiveNonAckElicitingPackets++
}
}
@@ -259,7 +272,7 @@ func (c *lossState) receiveAckRange(now time.Time, space numberSpace, rangeIndex
// receiveAckEnd finishes processing an ack frame.
// The lossf function is called for each packet newly detected as lost.
-func (c *lossState) receiveAckEnd(now time.Time, space numberSpace, ackDelay time.Duration, lossf func(numberSpace, *sentPacket, packetFate)) {
+func (c *lossState) receiveAckEnd(now time.Time, log *slog.Logger, space numberSpace, ackDelay time.Duration, lossf func(numberSpace, *sentPacket, packetFate)) {
c.spaces[space].sentPacketList.clean()
// Update the RTT sample when the largest acknowledged packet in the ACK frame
// is newly acknowledged, and at least one newly acknowledged packet is ack-eliciting.
@@ -278,11 +291,44 @@ func (c *lossState) receiveAckEnd(now time.Time, space numberSpace, ackDelay tim
// https://www.rfc-editor.org/rfc/rfc9002.html#section-6.2.2.1-3
c.timer = time.Time{}
c.detectLoss(now, lossf)
- c.cc.packetBatchEnd(now, space, &c.rtt, c.maxAckDelay)
+ c.cc.packetBatchEnd(now, log, space, &c.rtt, c.maxAckDelay)
+
+ if logEnabled(log, QLogLevelPacket) {
+ var ssthresh slog.Attr
+ if c.cc.slowStartThreshold != math.MaxInt {
+ ssthresh = slog.Int("ssthresh", c.cc.slowStartThreshold)
+ }
+ log.LogAttrs(context.Background(), QLogLevelPacket,
+ "recovery:metrics_updated",
+ slog.Duration("min_rtt", c.rtt.minRTT),
+ slog.Duration("smoothed_rtt", c.rtt.smoothedRTT),
+ slog.Duration("latest_rtt", c.rtt.latestRTT),
+ slog.Duration("rtt_variance", c.rtt.rttvar),
+ slog.Int("congestion_window", c.cc.congestionWindow),
+ slog.Int("bytes_in_flight", c.cc.bytesInFlight),
+ ssthresh,
+ )
+ }
+}
+
+// discardPackets declares that packets within a number space will not be delivered
+// and that data contained in them should be resent.
+// For example, after receiving a Retry packet we discard already-sent Initial packets.
+func (c *lossState) discardPackets(space numberSpace, log *slog.Logger, lossf func(numberSpace, *sentPacket, packetFate)) {
+ for i := 0; i < c.spaces[space].size; i++ {
+ sent := c.spaces[space].nth(i)
+ sent.lost = true
+ c.cc.packetDiscarded(sent)
+ lossf(numberSpace(space), sent, packetLost)
+ }
+ c.spaces[space].clean()
+ if logEnabled(log, QLogLevelPacket) {
+ logBytesInFlight(log, c.cc.bytesInFlight)
+ }
}
// discardKeys is called when dropping packet protection keys for a number space.
-func (c *lossState) discardKeys(now time.Time, space numberSpace) {
+func (c *lossState) discardKeys(now time.Time, log *slog.Logger, space numberSpace) {
// https://www.rfc-editor.org/rfc/rfc9002.html#section-6.4
for i := 0; i < c.spaces[space].size; i++ {
sent := c.spaces[space].nth(i)
@@ -292,6 +338,9 @@ func (c *lossState) discardKeys(now time.Time, space numberSpace) {
c.spaces[space].maxAcked = -1
c.spaces[space].lastAckEliciting = -1
c.scheduleTimer(now)
+ if logEnabled(log, QLogLevelPacket) {
+ logBytesInFlight(log, c.cc.bytesInFlight)
+ }
}
func (c *lossState) lossDuration() time.Duration {
@@ -418,12 +467,15 @@ func (c *lossState) scheduleTimer(now time.Time) {
c.timer = time.Time{}
return
}
- // https://www.rfc-editor.org/rfc/rfc9002.html#section-6.2.1
- pto := c.ptoBasePeriod() << c.ptoBackoffCount
- c.timer = last.Add(pto)
+ c.timer = last.Add(c.ptoPeriod())
c.ptoTimerArmed = true
}
+func (c *lossState) ptoPeriod() time.Duration {
+ // https://www.rfc-editor.org/rfc/rfc9002.html#section-6.2.1
+ return c.ptoBasePeriod() << c.ptoBackoffCount
+}
+
func (c *lossState) ptoBasePeriod() time.Duration {
// https://www.rfc-editor.org/rfc/rfc9002.html#section-6.2.1
pto := c.rtt.smoothedRTT + max(4*c.rtt.rttvar, timerGranularity)
@@ -435,3 +487,10 @@ func (c *lossState) ptoBasePeriod() time.Duration {
}
return pto
}
+
+func logBytesInFlight(log *slog.Logger, bytesInFlight int) {
+ log.LogAttrs(context.Background(), QLogLevelPacket,
+ "recovery:metrics_updated",
+ slog.Int("bytes_in_flight", bytesInFlight),
+ )
+}
diff --git a/internal/quic/loss_test.go b/quic/loss_test.go
similarity index 99%
rename from internal/quic/loss_test.go
rename to quic/loss_test.go
index efbf1649e..1fb9662e4 100644
--- a/internal/quic/loss_test.go
+++ b/quic/loss_test.go
@@ -1060,7 +1060,7 @@ func TestLossPersistentCongestion(t *testing.T) {
maxDatagramSize: 1200,
})
test.send(initialSpace, 0, testSentPacketSize(1200))
- test.c.cc.setUnderutilized(true)
+ test.c.cc.setUnderutilized(nil, true)
test.advance(10 * time.Millisecond)
test.ack(initialSpace, 0*time.Millisecond, i64range[packetNumber]{0, 1})
@@ -1377,7 +1377,7 @@ func (c *lossTest) setRTTVar(d time.Duration) {
func (c *lossTest) setUnderutilized(v bool) {
c.t.Logf("set congestion window underutilized: %v", v)
- c.c.cc.setUnderutilized(v)
+ c.c.cc.setUnderutilized(nil, v)
}
func (c *lossTest) advance(d time.Duration) {
@@ -1438,7 +1438,7 @@ func (c *lossTest) send(spaceID numberSpace, opts ...any) {
sent := &sentPacket{}
*sent = prototype
sent.num = num
- c.c.packetSent(c.now, spaceID, sent)
+ c.c.packetSent(c.now, nil, spaceID, sent)
}
}
@@ -1462,7 +1462,7 @@ func (c *lossTest) ack(spaceID numberSpace, ackDelay time.Duration, rs ...i64ran
c.t.Logf("ack %v delay=%v [%v,%v)", spaceID, ackDelay, r.start, r.end)
c.c.receiveAckRange(c.now, spaceID, i, r.start, r.end, c.onAckOrLoss)
}
- c.c.receiveAckEnd(c.now, spaceID, ackDelay, c.onAckOrLoss)
+ c.c.receiveAckEnd(c.now, nil, spaceID, ackDelay, c.onAckOrLoss)
}
func (c *lossTest) onAckOrLoss(space numberSpace, sent *sentPacket, fate packetFate) {
@@ -1491,7 +1491,7 @@ func (c *lossTest) discardKeys(spaceID numberSpace) {
c.t.Helper()
c.checkUnexpectedEvents()
c.t.Logf("discard %s keys", spaceID)
- c.c.discardKeys(c.now, spaceID)
+ c.c.discardKeys(c.now, nil, spaceID)
}
func (c *lossTest) setMaxAckDelay(d time.Duration) {
diff --git a/quic/main_test.go b/quic/main_test.go
new file mode 100644
index 000000000..ecd0b1e9f
--- /dev/null
+++ b/quic/main_test.go
@@ -0,0 +1,57 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "runtime"
+ "testing"
+ "time"
+)
+
+func TestMain(m *testing.M) {
+ defer os.Exit(m.Run())
+
+ // Look for leaked goroutines.
+ //
+ // Checking after every test makes it easier to tell which test is the culprit,
+ // but checking once at the end is faster and less likely to miss something.
+ if runtime.GOOS == "js" {
+ // The js-wasm runtime creates an additional background goroutine.
+ // Just skip the leak check there.
+ return
+ }
+ start := time.Now()
+ warned := false
+ for {
+ buf := make([]byte, 2<<20)
+ buf = buf[:runtime.Stack(buf, true)]
+ leaked := false
+ for _, g := range bytes.Split(buf, []byte("\n\n")) {
+ if bytes.Contains(g, []byte("quic.TestMain")) ||
+ bytes.Contains(g, []byte("created by os/signal.Notify")) ||
+ bytes.Contains(g, []byte("gotraceback_test.go")) {
+ continue
+ }
+ leaked = true
+ }
+ if !leaked {
+ break
+ }
+ if !warned && time.Since(start) > 1*time.Second {
+ // Print a warning quickly, in case this is an interactive session.
+ // Keep waiting until the test times out, in case this is a slow trybot.
+ fmt.Printf("Tests seem to have leaked some goroutines, still waiting.\n\n")
+ fmt.Print(string(buf))
+ warned = true
+ }
+ // Goroutines might still be shutting down.
+ time.Sleep(1 * time.Millisecond)
+ }
+}
diff --git a/internal/quic/math.go b/quic/math.go
similarity index 100%
rename from internal/quic/math.go
rename to quic/math.go
diff --git a/internal/quic/pacer.go b/quic/pacer.go
similarity index 100%
rename from internal/quic/pacer.go
rename to quic/pacer.go
diff --git a/internal/quic/pacer_test.go b/quic/pacer_test.go
similarity index 100%
rename from internal/quic/pacer_test.go
rename to quic/pacer_test.go
diff --git a/internal/quic/packet.go b/quic/packet.go
similarity index 52%
rename from internal/quic/packet.go
rename to quic/packet.go
index 93a9102e8..7a874319d 100644
--- a/internal/quic/packet.go
+++ b/quic/packet.go
@@ -6,6 +6,11 @@
package quic
+import (
+ "encoding/binary"
+ "fmt"
+)
+
// packetType is a QUIC packet type.
// https://www.rfc-editor.org/rfc/rfc9000.html#section-17
type packetType byte
@@ -20,12 +25,46 @@ const (
packetTypeVersionNegotiation
)
+func (p packetType) String() string {
+ switch p {
+ case packetTypeInitial:
+ return "Initial"
+ case packetType0RTT:
+ return "0-RTT"
+ case packetTypeHandshake:
+ return "Handshake"
+ case packetTypeRetry:
+ return "Retry"
+ case packetType1RTT:
+ return "1-RTT"
+ }
+ return fmt.Sprintf("unknown packet type %v", byte(p))
+}
+
+func (p packetType) qlogString() string {
+ switch p {
+ case packetTypeInitial:
+ return "initial"
+ case packetType0RTT:
+ return "0RTT"
+ case packetTypeHandshake:
+ return "handshake"
+ case packetTypeRetry:
+ return "retry"
+ case packetType1RTT:
+ return "1RTT"
+ }
+ return "unknown"
+}
+
// Bits set in the first byte of a packet.
const (
- headerFormLong = 0x80 // https://www.rfc-editor.org/rfc/rfc9000.html#section-17.2-3.2.1
- headerFormShort = 0x00 // https://www.rfc-editor.org/rfc/rfc9000.html#section-17.3.1-4.2.1
- fixedBit = 0x40 // https://www.rfc-editor.org/rfc/rfc9000.html#section-17.2-3.4.1
- reservedBits = 0x0c // https://www.rfc-editor.org/rfc/rfc9000#section-17.2-8.2.1
+ headerFormLong = 0x80 // https://www.rfc-editor.org/rfc/rfc9000.html#section-17.2-3.2.1
+ headerFormShort = 0x00 // https://www.rfc-editor.org/rfc/rfc9000.html#section-17.3.1-4.2.1
+ fixedBit = 0x40 // https://www.rfc-editor.org/rfc/rfc9000.html#section-17.2-3.4.1
+ reservedLongBits = 0x0c // https://www.rfc-editor.org/rfc/rfc9000#section-17.2-8.2.1
+ reserved1RTTBits = 0x18 // https://www.rfc-editor.org/rfc/rfc9000#section-17.3.1-4.8.1
+ keyPhaseBit = 0x04 // https://www.rfc-editor.org/rfc/rfc9000#section-17.3.1-4.10.1
)
// Long Packet Type bits.
@@ -74,6 +113,9 @@ const (
streamFinBit = 0x01
)
+// Maximum length of a connection ID.
+const maxConnIDLen = 20
+
// isLongHeader returns true if b is the first byte of a long header.
func isLongHeader(b byte) bool {
return b&headerFormLong == headerFormLong
@@ -137,15 +179,41 @@ func dstConnIDForDatagram(pkt []byte) (id []byte, ok bool) {
return b[:n], true
}
+// parseVersionNegotiation parses a Version Negotiation packet.
+// The returned versions is a slice of big-endian uint32s.
+// It returns (nil, nil, nil) for an invalid packet.
+func parseVersionNegotiation(pkt []byte) (dstConnID, srcConnID, versions []byte) {
+ p, ok := parseGenericLongHeaderPacket(pkt)
+ if !ok {
+ return nil, nil, nil
+ }
+ if len(p.data)%4 != 0 {
+ return nil, nil, nil
+ }
+ return p.dstConnID, p.srcConnID, p.data
+}
+
+// appendVersionNegotiation appends a Version Negotiation packet to pkt,
+// returning the result.
+func appendVersionNegotiation(pkt, dstConnID, srcConnID []byte, versions ...uint32) []byte {
+ pkt = append(pkt, headerFormLong|fixedBit) // header byte
+ pkt = append(pkt, 0, 0, 0, 0) // Version (0 for Version Negotiation)
+ pkt = appendUint8Bytes(pkt, dstConnID) // Destination Connection ID
+ pkt = appendUint8Bytes(pkt, srcConnID) // Source Connection ID
+ for _, v := range versions {
+ pkt = binary.BigEndian.AppendUint32(pkt, v) // Supported Version
+ }
+ return pkt
+}
+
// A longPacket is a long header packet.
type longPacket struct {
- ptype packetType
- reservedBits uint8
- version uint32
- num packetNumber
- dstConnID []byte
- srcConnID []byte
- payload []byte
+ ptype packetType
+ version uint32
+ num packetNumber
+ dstConnID []byte
+ srcConnID []byte
+ payload []byte
// The extra data depends on the packet type:
// Initial: Token.
@@ -155,7 +223,45 @@ type longPacket struct {
// A shortPacket is a short header (1-RTT) packet.
type shortPacket struct {
- reservedBits uint8
- num packetNumber
- payload []byte
+ num packetNumber
+ payload []byte
+}
+
+// A genericLongPacket is a long header packet of an arbitrary QUIC version.
+// https://www.rfc-editor.org/rfc/rfc8999#section-5.1
+type genericLongPacket struct {
+ version uint32
+ dstConnID []byte
+ srcConnID []byte
+ data []byte
+}
+
+func parseGenericLongHeaderPacket(b []byte) (p genericLongPacket, ok bool) {
+ if len(b) < 5 || !isLongHeader(b[0]) {
+ return genericLongPacket{}, false
+ }
+ b = b[1:]
+ // Version (32),
+ var n int
+ p.version, n = consumeUint32(b)
+ if n < 0 {
+ return genericLongPacket{}, false
+ }
+ b = b[n:]
+ // Destination Connection ID Length (8),
+ // Destination Connection ID (0..2048),
+ p.dstConnID, n = consumeUint8Bytes(b)
+ if n < 0 || len(p.dstConnID) > 2048/8 {
+ return genericLongPacket{}, false
+ }
+ b = b[n:]
+ // Source Connection ID Length (8),
+ // Source Connection ID (0..2048),
+ p.srcConnID, n = consumeUint8Bytes(b)
+ if n < 0 || len(p.dstConnID) > 2048/8 {
+ return genericLongPacket{}, false
+ }
+ b = b[n:]
+ p.data = b
+ return p, true
}
diff --git a/internal/quic/packet_codec_test.go b/quic/packet_codec_test.go
similarity index 80%
rename from internal/quic/packet_codec_test.go
rename to quic/packet_codec_test.go
index 3503d2431..3b39795ef 100644
--- a/internal/quic/packet_codec_test.go
+++ b/quic/packet_codec_test.go
@@ -9,15 +9,20 @@ package quic
import (
"bytes"
"crypto/tls"
+ "io"
+ "log/slog"
"reflect"
"testing"
+ "time"
+
+ "golang.org/x/net/quic/qlog"
)
func TestParseLongHeaderPacket(t *testing.T) {
// Example Initial packet from:
// https://www.rfc-editor.org/rfc/rfc9001.html#section-a.3
cid := unhex(`8394c8f03e515708`)
- _, initialServerKeys := initialKeys(cid)
+ initialServerKeys := initialKeys(cid, clientSide).r
pkt := unhex(`
cf000000010008f067a5502a4262b500 4075c0d95a482cd0991cd25b0aac406a
5816b6394100f37a1c69797554780bb3 8cc5a99f5ede4cf73c3ec2493a1839b3
@@ -65,20 +70,21 @@ func TestParseLongHeaderPacket(t *testing.T) {
}
// Parse with the wrong keys.
- _, invalidKeys := initialKeys([]byte{})
+ invalidKeys := initialKeys([]byte{}, clientSide).w
if _, n := parseLongHeaderPacket(pkt, invalidKeys, 0); n != -1 {
t.Fatalf("parse long header packet with wrong keys: n=%v, want -1", n)
}
}
func TestRoundtripEncodeLongPacket(t *testing.T) {
- aes128Keys, _ := newKeys(tls.TLS_AES_128_GCM_SHA256, []byte("secret"))
- aes256Keys, _ := newKeys(tls.TLS_AES_256_GCM_SHA384, []byte("secret"))
- chachaKeys, _ := newKeys(tls.TLS_CHACHA20_POLY1305_SHA256, []byte("secret"))
+ var aes128Keys, aes256Keys, chachaKeys fixedKeys
+ aes128Keys.init(tls.TLS_AES_128_GCM_SHA256, []byte("secret"))
+ aes256Keys.init(tls.TLS_AES_256_GCM_SHA384, []byte("secret"))
+ chachaKeys.init(tls.TLS_CHACHA20_POLY1305_SHA256, []byte("secret"))
for _, test := range []struct {
desc string
p longPacket
- k keys
+ k fixedKeys
}{{
desc: "Initial, 1-byte number, AES128",
p: longPacket{
@@ -145,9 +151,16 @@ func TestRoundtripEncodeLongPacket(t *testing.T) {
}
func TestRoundtripEncodeShortPacket(t *testing.T) {
- aes128Keys, _ := newKeys(tls.TLS_AES_128_GCM_SHA256, []byte("secret"))
- aes256Keys, _ := newKeys(tls.TLS_AES_256_GCM_SHA384, []byte("secret"))
- chachaKeys, _ := newKeys(tls.TLS_CHACHA20_POLY1305_SHA256, []byte("secret"))
+ var aes128Keys, aes256Keys, chachaKeys updatingKeyPair
+ aes128Keys.r.init(tls.TLS_AES_128_GCM_SHA256, []byte("secret"))
+ aes256Keys.r.init(tls.TLS_AES_256_GCM_SHA384, []byte("secret"))
+ chachaKeys.r.init(tls.TLS_CHACHA20_POLY1305_SHA256, []byte("secret"))
+ aes128Keys.w = aes128Keys.r
+ aes256Keys.w = aes256Keys.r
+ chachaKeys.w = chachaKeys.r
+ aes128Keys.updateAfter = maxPacketNumber
+ aes256Keys.updateAfter = maxPacketNumber
+ chachaKeys.updateAfter = maxPacketNumber
connID := make([]byte, connIDLen)
for i := range connID {
connID[i] = byte(i)
@@ -156,7 +169,7 @@ func TestRoundtripEncodeShortPacket(t *testing.T) {
desc string
num packetNumber
payload []byte
- k keys
+ k updatingKeyPair
}{{
desc: "1-byte number, AES128",
num: 0, // 1-byte encoding,
@@ -183,11 +196,11 @@ func TestRoundtripEncodeShortPacket(t *testing.T) {
w.reset(1200)
w.start1RTTPacket(test.num, 0, connID)
w.b = append(w.b, test.payload...)
- w.finish1RTTPacket(test.num, 0, connID, test.k)
+ w.finish1RTTPacket(test.num, 0, connID, &test.k)
pkt := w.datagram()
- p, n := parse1RTTPacket(pkt, test.k, connIDLen, 0)
- if n != len(pkt) {
- t.Errorf("parse1RTTPacket: n=%v, want %v", n, len(pkt))
+ p, err := parse1RTTPacket(pkt, &test.k, connIDLen, 0)
+ if err != nil {
+ t.Errorf("parse1RTTPacket: err=%v, want nil", err)
}
if p.num != test.num || !bytes.Equal(p.payload, test.payload) {
t.Errorf("Round-trip encode/decode did not preserve packet.\nsent: num=%v, payload={%x}\ngot: num=%v, payload={%x}", test.num, test.payload, p.num, p.payload)
@@ -199,11 +212,13 @@ func TestRoundtripEncodeShortPacket(t *testing.T) {
func TestFrameEncodeDecode(t *testing.T) {
for _, test := range []struct {
s string
+ j string
f debugFrame
b []byte
truncated []byte
}{{
s: "PADDING*1",
+ j: `{"frame_type":"padding","length":1}`,
f: debugFramePadding{
size: 1,
},
@@ -213,12 +228,14 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "PING",
+ j: `{"frame_type":"ping"}`,
f: debugFramePing{},
b: []byte{
0x01, // TYPE(i) = 0x01
},
}, {
s: "ACK Delay=10 [0,16) [17,32) [48,64)",
+ j: `"error: debugFrameAck should not appear as a slog Value"`,
f: debugFrameAck{
ackDelay: 10,
ranges: []i64range[packetNumber]{
@@ -249,6 +266,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "RESET_STREAM ID=1 Code=2 FinalSize=3",
+ j: `{"frame_type":"reset_stream","stream_id":1,"final_size":3}`,
f: debugFrameResetStream{
id: 1,
code: 2,
@@ -262,6 +280,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "STOP_SENDING ID=1 Code=2",
+ j: `{"frame_type":"stop_sending","stream_id":1,"error_code":2}`,
f: debugFrameStopSending{
id: 1,
code: 2,
@@ -273,6 +292,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "CRYPTO Offset=1 Length=2",
+ j: `{"frame_type":"crypto","offset":1,"length":2}`,
f: debugFrameCrypto{
off: 1,
data: []byte{3, 4},
@@ -291,6 +311,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "NEW_TOKEN Token=0304",
+ j: `{"frame_type":"new_token","token":"0304"}`,
f: debugFrameNewToken{
token: []byte{3, 4},
},
@@ -301,6 +322,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "STREAM ID=1 Offset=0 Length=0",
+ j: `{"frame_type":"stream","stream_id":1,"offset":0,"length":0}`,
f: debugFrameStream{
id: 1,
fin: false,
@@ -316,6 +338,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "STREAM ID=100 Offset=4 Length=3",
+ j: `{"frame_type":"stream","stream_id":100,"offset":4,"length":3}`,
f: debugFrameStream{
id: 100,
fin: false,
@@ -338,6 +361,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "STREAM ID=100 FIN Offset=4 Length=3",
+ j: `{"frame_type":"stream","stream_id":100,"offset":4,"length":3,"fin":true}`,
f: debugFrameStream{
id: 100,
fin: true,
@@ -360,6 +384,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "STREAM ID=1 FIN Offset=100 Length=0",
+ j: `{"frame_type":"stream","stream_id":1,"offset":100,"length":0,"fin":true}`,
f: debugFrameStream{
id: 1,
fin: true,
@@ -375,6 +400,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "MAX_DATA Max=10",
+ j: `{"frame_type":"max_data","maximum":10}`,
f: debugFrameMaxData{
max: 10,
},
@@ -384,6 +410,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "MAX_STREAM_DATA ID=1 Max=10",
+ j: `{"frame_type":"max_stream_data","stream_id":1,"maximum":10}`,
f: debugFrameMaxStreamData{
id: 1,
max: 10,
@@ -395,6 +422,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "MAX_STREAMS Type=bidi Max=1",
+ j: `{"frame_type":"max_streams","stream_type":"bidirectional","maximum":1}`,
f: debugFrameMaxStreams{
streamType: bidiStream,
max: 1,
@@ -405,6 +433,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "MAX_STREAMS Type=uni Max=1",
+ j: `{"frame_type":"max_streams","stream_type":"unidirectional","maximum":1}`,
f: debugFrameMaxStreams{
streamType: uniStream,
max: 1,
@@ -415,6 +444,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "DATA_BLOCKED Max=1",
+ j: `{"frame_type":"data_blocked","limit":1}`,
f: debugFrameDataBlocked{
max: 1,
},
@@ -424,6 +454,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "STREAM_DATA_BLOCKED ID=1 Max=2",
+ j: `{"frame_type":"stream_data_blocked","stream_id":1,"limit":2}`,
f: debugFrameStreamDataBlocked{
id: 1,
max: 2,
@@ -435,6 +466,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "STREAMS_BLOCKED Type=bidi Max=1",
+ j: `{"frame_type":"streams_blocked","stream_type":"bidirectional","limit":1}`,
f: debugFrameStreamsBlocked{
streamType: bidiStream,
max: 1,
@@ -445,6 +477,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "STREAMS_BLOCKED Type=uni Max=1",
+ j: `{"frame_type":"streams_blocked","stream_type":"unidirectional","limit":1}`,
f: debugFrameStreamsBlocked{
streamType: uniStream,
max: 1,
@@ -455,6 +488,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "NEW_CONNECTION_ID Seq=3 Retire=2 ID=a0a1a2a3 Token=0102030405060708090a0b0c0d0e0f10",
+ j: `{"frame_type":"new_connection_id","sequence_number":3,"retire_prior_to":2,"connection_id":"a0a1a2a3","stateless_reset_token":"0102030405060708090a0b0c0d0e0f10"}`,
f: debugFrameNewConnectionID{
seq: 3,
retirePriorTo: 2,
@@ -471,6 +505,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "RETIRE_CONNECTION_ID Seq=1",
+ j: `{"frame_type":"retire_connection_id","sequence_number":1}`,
f: debugFrameRetireConnectionID{
seq: 1,
},
@@ -480,8 +515,9 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "PATH_CHALLENGE Data=0123456789abcdef",
+ j: `{"frame_type":"path_challenge","data":"0123456789abcdef"}`,
f: debugFramePathChallenge{
- data: 0x0123456789abcdef,
+ data: pathChallengeData{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef},
},
b: []byte{
0x1a, // Type (i) = 0x1a,
@@ -489,8 +525,9 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "PATH_RESPONSE Data=0123456789abcdef",
+ j: `{"frame_type":"path_response","data":"0123456789abcdef"}`,
f: debugFramePathResponse{
- data: 0x0123456789abcdef,
+ data: pathChallengeData{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef},
},
b: []byte{
0x1b, // Type (i) = 0x1b,
@@ -498,6 +535,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: `CONNECTION_CLOSE Code=INTERNAL_ERROR FrameType=2 Reason="oops"`,
+ j: `{"frame_type":"connection_close","error_space":"transport","error_code_value":1,"reason":"oops"}`,
f: debugFrameConnectionCloseTransport{
code: 1,
frameType: 2,
@@ -512,6 +550,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: `CONNECTION_CLOSE AppCode=1 Reason="oops"`,
+ j: `{"frame_type":"connection_close","error_space":"application","error_code_value":1,"reason":"oops"}`,
f: debugFrameConnectionCloseApplication{
code: 1,
reason: "oops",
@@ -524,6 +563,7 @@ func TestFrameEncodeDecode(t *testing.T) {
},
}, {
s: "HANDSHAKE_DONE",
+ j: `{"frame_type":"handshake_done"}`,
f: debugFrameHandshakeDone{},
b: []byte{
0x1e, // Type (i) = 0x1e,
@@ -546,6 +586,9 @@ func TestFrameEncodeDecode(t *testing.T) {
if got, want := test.f.String(), test.s; got != want {
t.Errorf("frame.String():\ngot %q\nwant %q", got, want)
}
+ if got, want := frameJSON(test.f), test.j; got != want {
+ t.Errorf("frame.LogValue():\ngot %q\nwant %q", got, want)
+ }
// Try encoding the frame into too little space.
// Most frames will result in an error; some (like STREAM frames) will truncate
@@ -571,6 +614,42 @@ func TestFrameEncodeDecode(t *testing.T) {
}
}
+func TestFrameScaledAck(t *testing.T) {
+ for _, test := range []struct {
+ j string
+ f debugFrameScaledAck
+ }{{
+ j: `{"frame_type":"ack","acked_ranges":[[0,15],[17],[48,63]],"ack_delay":10.000000}`,
+ f: debugFrameScaledAck{
+ ackDelay: 10 * time.Millisecond,
+ ranges: []i64range[packetNumber]{
+ {0x00, 0x10},
+ {0x11, 0x12},
+ {0x30, 0x40},
+ },
+ },
+ }} {
+ if got, want := frameJSON(test.f), test.j; got != want {
+ t.Errorf("frame.LogValue():\ngot %q\nwant %q", got, want)
+ }
+ }
+}
+
+func frameJSON(f slog.LogValuer) string {
+ var buf bytes.Buffer
+ h := qlog.NewJSONHandler(qlog.HandlerOptions{
+ Level: QLogLevelFrame,
+ NewTrace: func(info qlog.TraceInfo) (io.WriteCloser, error) {
+ return nopCloseWriter{&buf}, nil
+ },
+ })
+ // Log the frame, and then trim out everything but the frame from the log.
+ slog.New(h).Info("message", slog.Any("frame", f))
+ _, b, _ := bytes.Cut(buf.Bytes(), []byte(`"frame":`))
+ b = bytes.TrimSuffix(b, []byte("}}\n"))
+ return string(b)
+}
+
func TestFrameDecode(t *testing.T) {
for _, test := range []struct {
desc string
@@ -700,7 +779,7 @@ func TestFrameDecodeErrors(t *testing.T) {
func FuzzParseLongHeaderPacket(f *testing.F) {
cid := unhex(`0000000000000000`)
- _, initialServerKeys := initialKeys(cid)
+ initialServerKeys := initialKeys(cid, clientSide).r
f.Fuzz(func(t *testing.T, in []byte) {
parseLongHeaderPacket(in, initialServerKeys, 0)
})
diff --git a/internal/quic/packet_number.go b/quic/packet_number.go
similarity index 100%
rename from internal/quic/packet_number.go
rename to quic/packet_number.go
diff --git a/internal/quic/packet_number_test.go b/quic/packet_number_test.go
similarity index 100%
rename from internal/quic/packet_number_test.go
rename to quic/packet_number_test.go
diff --git a/internal/quic/packet_parser.go b/quic/packet_parser.go
similarity index 86%
rename from internal/quic/packet_parser.go
rename to quic/packet_parser.go
index 908a82ed9..feef9eac7 100644
--- a/internal/quic/packet_parser.go
+++ b/quic/packet_parser.go
@@ -18,7 +18,7 @@ package quic
// and its length in bytes.
//
// It returns an empty packet and -1 if the packet could not be parsed.
-func parseLongHeaderPacket(pkt []byte, k keys, pnumMax packetNumber) (p longPacket, n int) {
+func parseLongHeaderPacket(pkt []byte, k fixedKeys, pnumMax packetNumber) (p longPacket, n int) {
if len(pkt) < 5 || !isLongHeader(pkt[0]) {
return longPacket{}, -1
}
@@ -47,7 +47,7 @@ func parseLongHeaderPacket(pkt []byte, k keys, pnumMax packetNumber) (p longPack
// Destination Connection ID Length (8),
// Destination Connection ID (0..160),
p.dstConnID, n = consumeUint8Bytes(b)
- if n < 0 || len(p.dstConnID) > 20 {
+ if n < 0 || len(p.dstConnID) > maxConnIDLen {
return longPacket{}, -1
}
b = b[n:]
@@ -55,7 +55,7 @@ func parseLongHeaderPacket(pkt []byte, k keys, pnumMax packetNumber) (p longPack
// Source Connection ID Length (8),
// Source Connection ID (0..160),
p.srcConnID, n = consumeUint8Bytes(b)
- if n < 0 || len(p.dstConnID) > 20 {
+ if n < 0 || len(p.dstConnID) > maxConnIDLen {
return longPacket{}, -1
}
b = b[n:]
@@ -91,15 +91,12 @@ func parseLongHeaderPacket(pkt []byte, k keys, pnumMax packetNumber) (p longPack
pnumOff := len(pkt) - len(b)
pkt = pkt[:pnumOff+int(payLen)]
- if k.initialized() {
+ if k.isSet() {
var err error
p.payload, p.num, err = k.unprotect(pkt, pnumOff, pnumMax)
if err != nil {
return longPacket{}, -1
}
- // Reserved bits should always be zero, but this is handled
- // as a protocol-level violation by the caller rather than a parse error.
- p.reservedBits = pkt[0] & reservedBits
}
return p, len(pkt)
}
@@ -146,23 +143,21 @@ func skipLongHeaderPacket(pkt []byte) int {
//
// On input, pkt contains a short header packet, k the decryption keys for the packet,
// and pnumMax the largest packet number seen in the number space of this packet.
-func parse1RTTPacket(pkt []byte, k keys, dstConnIDLen int, pnumMax packetNumber) (p shortPacket, n int) {
- var err error
- p.payload, p.num, err = k.unprotect(pkt, 1+dstConnIDLen, pnumMax)
+func parse1RTTPacket(pkt []byte, k *updatingKeyPair, dstConnIDLen int, pnumMax packetNumber) (p shortPacket, err error) {
+ pay, pnum, err := k.unprotect(pkt, 1+dstConnIDLen, pnumMax)
if err != nil {
- return shortPacket{}, -1
+ return shortPacket{}, err
}
- // Reserved bits should always be zero, but this is handled
- // as a protocol-level violation by the caller rather than a parse error.
- p.reservedBits = pkt[0] & reservedBits
- return p, len(pkt)
+ p.num = pnum
+ p.payload = pay
+ return p, nil
}
// Consume functions return n=-1 on conditions which result in FRAME_ENCODING_ERROR,
// which includes both general parse failures and specific violations of frame
// constraints.
-func consumeAckFrame(frame []byte, f func(start, end packetNumber)) (largest packetNumber, ackDelay unscaledAckDelay, n int) {
+func consumeAckFrame(frame []byte, f func(rangeIndex int, start, end packetNumber)) (largest packetNumber, ackDelay unscaledAckDelay, n int) {
b := frame[1:] // type
largestAck, n := consumeVarint(b)
@@ -195,7 +190,7 @@ func consumeAckFrame(frame []byte, f func(start, end packetNumber)) (largest pac
if rangeMin < 0 || rangeMin > rangeMax {
return 0, 0, -1
}
- f(rangeMin, rangeMax+1)
+ f(int(i), rangeMin, rangeMax+1)
if i == ackRangeCount {
break
@@ -330,6 +325,9 @@ func consumeStreamFrame(b []byte) (id streamID, off int64, fin bool, data []byte
data = b[n:]
n += len(data)
}
+ if off+int64(len(data)) >= 1<<62 {
+ return 0, 0, false, nil, -1
+ }
return streamID(idInt), off, fin, data, n
}
@@ -375,7 +373,7 @@ func consumeMaxStreamsFrame(b []byte) (typ streamType, max int64, n int) {
return 0, 0, -1
}
n += nn
- if v > 1<<60 {
+ if v > maxStreamsLimit {
return 0, 0, -1
}
return typ, int64(v), n
@@ -422,42 +420,42 @@ func consumeStreamsBlockedFrame(b []byte) (typ streamType, max int64, n int) {
return typ, max, n
}
-func consumeNewConnectionIDFrame(b []byte) (seq, retire int64, connID []byte, resetToken [16]byte, n int) {
+func consumeNewConnectionIDFrame(b []byte) (seq, retire int64, connID []byte, resetToken statelessResetToken, n int) {
n = 1
var nn int
seq, nn = consumeVarintInt64(b[n:])
if nn < 0 {
- return 0, 0, nil, [16]byte{}, -1
+ return 0, 0, nil, statelessResetToken{}, -1
}
n += nn
retire, nn = consumeVarintInt64(b[n:])
if nn < 0 {
- return 0, 0, nil, [16]byte{}, -1
+ return 0, 0, nil, statelessResetToken{}, -1
}
n += nn
if seq < retire {
- return 0, 0, nil, [16]byte{}, -1
+ return 0, 0, nil, statelessResetToken{}, -1
}
connID, nn = consumeVarintBytes(b[n:])
if nn < 0 {
- return 0, 0, nil, [16]byte{}, -1
+ return 0, 0, nil, statelessResetToken{}, -1
}
if len(connID) < 1 || len(connID) > 20 {
- return 0, 0, nil, [16]byte{}, -1
+ return 0, 0, nil, statelessResetToken{}, -1
}
n += nn
if len(b[n:]) < len(resetToken) {
- return 0, 0, nil, [16]byte{}, -1
+ return 0, 0, nil, statelessResetToken{}, -1
}
copy(resetToken[:], b[n:])
n += len(resetToken)
return seq, retire, connID, resetToken, n
}
-func consumeRetireConnectionIDFrame(b []byte) (seq uint64, n int) {
+func consumeRetireConnectionIDFrame(b []byte) (seq int64, n int) {
n = 1
var nn int
- seq, nn = consumeVarint(b[n:])
+ seq, nn = consumeVarintInt64(b[n:])
if nn < 0 {
return 0, -1
}
@@ -465,18 +463,17 @@ func consumeRetireConnectionIDFrame(b []byte) (seq uint64, n int) {
return seq, n
}
-func consumePathChallengeFrame(b []byte) (data uint64, n int) {
+func consumePathChallengeFrame(b []byte) (data pathChallengeData, n int) {
n = 1
- var nn int
- data, nn = consumeUint64(b[n:])
- if nn < 0 {
- return 0, -1
+ nn := copy(data[:], b[n:])
+ if nn != len(data) {
+ return data, -1
}
n += nn
return data, n
}
-func consumePathResponseFrame(b []byte) (data uint64, n int) {
+func consumePathResponseFrame(b []byte) (data pathChallengeData, n int) {
return consumePathChallengeFrame(b) // identical frame format
}
diff --git a/quic/packet_protection.go b/quic/packet_protection.go
new file mode 100644
index 000000000..1f939f491
--- /dev/null
+++ b/quic/packet_protection.go
@@ -0,0 +1,535 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "crypto"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/sha256"
+ "crypto/tls"
+ "errors"
+ "hash"
+
+ "golang.org/x/crypto/chacha20"
+ "golang.org/x/crypto/chacha20poly1305"
+ "golang.org/x/crypto/cryptobyte"
+ "golang.org/x/crypto/hkdf"
+)
+
+var errInvalidPacket = errors.New("quic: invalid packet")
+
+// headerProtectionSampleSize is the size of the ciphertext sample used for header protection.
+// https://www.rfc-editor.org/rfc/rfc9001#section-5.4.2
+const headerProtectionSampleSize = 16
+
+// aeadOverhead is the difference in size between the AEAD output and input.
+// All cipher suites defined for use with QUIC have 16 bytes of overhead.
+const aeadOverhead = 16
+
+// A headerKey applies or removes header protection.
+// https://www.rfc-editor.org/rfc/rfc9001#section-5.4
+type headerKey struct {
+ hp headerProtection
+}
+
+func (k headerKey) isSet() bool {
+ return k.hp != nil
+}
+
+func (k *headerKey) init(suite uint16, secret []byte) {
+ h, keySize := hashForSuite(suite)
+ hpKey := hkdfExpandLabel(h.New, secret, "quic hp", nil, keySize)
+ switch suite {
+ case tls.TLS_AES_128_GCM_SHA256, tls.TLS_AES_256_GCM_SHA384:
+ c, err := aes.NewCipher(hpKey)
+ if err != nil {
+ panic(err)
+ }
+ k.hp = &aesHeaderProtection{cipher: c}
+ case tls.TLS_CHACHA20_POLY1305_SHA256:
+ k.hp = chaCha20HeaderProtection{hpKey}
+ default:
+ panic("BUG: unknown cipher suite")
+ }
+}
+
+// protect applies header protection.
+// pnumOff is the offset of the packet number in the packet.
+func (k headerKey) protect(hdr []byte, pnumOff int) {
+ // Apply header protection.
+ pnumSize := int(hdr[0]&0x03) + 1
+ sample := hdr[pnumOff+4:][:headerProtectionSampleSize]
+ mask := k.hp.headerProtection(sample)
+ if isLongHeader(hdr[0]) {
+ hdr[0] ^= mask[0] & 0x0f
+ } else {
+ hdr[0] ^= mask[0] & 0x1f
+ }
+ for i := 0; i < pnumSize; i++ {
+ hdr[pnumOff+i] ^= mask[1+i]
+ }
+}
+
+// unprotect removes header protection.
+// pnumOff is the offset of the packet number in the packet.
+// pnumMax is the largest packet number seen in the number space of this packet.
+func (k headerKey) unprotect(pkt []byte, pnumOff int, pnumMax packetNumber) (hdr, pay []byte, pnum packetNumber, _ error) {
+ if len(pkt) < pnumOff+4+headerProtectionSampleSize {
+ return nil, nil, 0, errInvalidPacket
+ }
+ numpay := pkt[pnumOff:]
+ sample := numpay[4:][:headerProtectionSampleSize]
+ mask := k.hp.headerProtection(sample)
+ if isLongHeader(pkt[0]) {
+ pkt[0] ^= mask[0] & 0x0f
+ } else {
+ pkt[0] ^= mask[0] & 0x1f
+ }
+ pnumLen := int(pkt[0]&0x03) + 1
+ pnum = packetNumber(0)
+ for i := 0; i < pnumLen; i++ {
+ numpay[i] ^= mask[1+i]
+ pnum = (pnum << 8) | packetNumber(numpay[i])
+ }
+ pnum = decodePacketNumber(pnumMax, pnum, pnumLen)
+ hdr = pkt[:pnumOff+pnumLen]
+ pay = numpay[pnumLen:]
+ return hdr, pay, pnum, nil
+}
+
+// headerProtection is the header_protection function as defined in:
+// https://www.rfc-editor.org/rfc/rfc9001#section-5.4.1
+//
+// This function takes a sample of the packet ciphertext
+// and returns a 5-byte mask which will be applied to the
+// protected portions of the packet header.
+type headerProtection interface {
+ headerProtection(sample []byte) (mask [5]byte)
+}
+
+// AES-based header protection.
+// https://www.rfc-editor.org/rfc/rfc9001#section-5.4.3
+type aesHeaderProtection struct {
+ cipher cipher.Block
+ scratch [aes.BlockSize]byte
+}
+
+func (hp *aesHeaderProtection) headerProtection(sample []byte) (mask [5]byte) {
+ hp.cipher.Encrypt(hp.scratch[:], sample)
+ copy(mask[:], hp.scratch[:])
+ return mask
+}
+
+// ChaCha20-based header protection.
+// https://www.rfc-editor.org/rfc/rfc9001#section-5.4.4
+type chaCha20HeaderProtection struct {
+ key []byte
+}
+
+func (hp chaCha20HeaderProtection) headerProtection(sample []byte) (mask [5]byte) {
+ counter := uint32(sample[3])<<24 | uint32(sample[2])<<16 | uint32(sample[1])<<8 | uint32(sample[0])
+ nonce := sample[4:16]
+ c, err := chacha20.NewUnauthenticatedCipher(hp.key, nonce)
+ if err != nil {
+ panic(err)
+ }
+ c.SetCounter(counter)
+ c.XORKeyStream(mask[:], mask[:])
+ return mask
+}
+
+// A packetKey applies or removes packet protection.
+// https://www.rfc-editor.org/rfc/rfc9001#section-5.1
+type packetKey struct {
+ aead cipher.AEAD // AEAD function used for packet protection.
+ iv []byte // IV used to construct the AEAD nonce.
+}
+
+func (k *packetKey) init(suite uint16, secret []byte) {
+ // https://www.rfc-editor.org/rfc/rfc9001#section-5.1
+ h, keySize := hashForSuite(suite)
+ key := hkdfExpandLabel(h.New, secret, "quic key", nil, keySize)
+ switch suite {
+ case tls.TLS_AES_128_GCM_SHA256, tls.TLS_AES_256_GCM_SHA384:
+ k.aead = newAESAEAD(key)
+ case tls.TLS_CHACHA20_POLY1305_SHA256:
+ k.aead = newChaCha20AEAD(key)
+ default:
+ panic("BUG: unknown cipher suite")
+ }
+ k.iv = hkdfExpandLabel(h.New, secret, "quic iv", nil, k.aead.NonceSize())
+}
+
+func newAESAEAD(key []byte) cipher.AEAD {
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ panic(err)
+ }
+ aead, err := cipher.NewGCM(c)
+ if err != nil {
+ panic(err)
+ }
+ return aead
+}
+
+func newChaCha20AEAD(key []byte) cipher.AEAD {
+ var err error
+ aead, err := chacha20poly1305.New(key)
+ if err != nil {
+ panic(err)
+ }
+ return aead
+}
+
+func (k packetKey) protect(hdr, pay []byte, pnum packetNumber) []byte {
+ k.xorIV(pnum)
+ defer k.xorIV(pnum)
+ return k.aead.Seal(hdr, k.iv, pay, hdr)
+}
+
+func (k packetKey) unprotect(hdr, pay []byte, pnum packetNumber) (dec []byte, err error) {
+ k.xorIV(pnum)
+ defer k.xorIV(pnum)
+ return k.aead.Open(pay[:0], k.iv, pay, hdr)
+}
+
+// xorIV xors the packet protection IV with the packet number.
+func (k packetKey) xorIV(pnum packetNumber) {
+ k.iv[len(k.iv)-8] ^= uint8(pnum >> 56)
+ k.iv[len(k.iv)-7] ^= uint8(pnum >> 48)
+ k.iv[len(k.iv)-6] ^= uint8(pnum >> 40)
+ k.iv[len(k.iv)-5] ^= uint8(pnum >> 32)
+ k.iv[len(k.iv)-4] ^= uint8(pnum >> 24)
+ k.iv[len(k.iv)-3] ^= uint8(pnum >> 16)
+ k.iv[len(k.iv)-2] ^= uint8(pnum >> 8)
+ k.iv[len(k.iv)-1] ^= uint8(pnum)
+}
+
+// A fixedKeys is a header protection key and fixed packet protection key.
+// The packet protection key is fixed (it does not update).
+//
+// Fixed keys are used for Initial and Handshake keys, which do not update.
+type fixedKeys struct {
+ hdr headerKey
+ pkt packetKey
+}
+
+func (k *fixedKeys) init(suite uint16, secret []byte) {
+ k.hdr.init(suite, secret)
+ k.pkt.init(suite, secret)
+}
+
+func (k fixedKeys) isSet() bool {
+ return k.hdr.hp != nil
+}
+
+// protect applies packet protection to a packet.
+//
+// On input, hdr contains the packet header, pay the unencrypted payload,
+// pnumOff the offset of the packet number in the header, and pnum the untruncated
+// packet number.
+//
+// protect returns the result of appending the encrypted payload to hdr and
+// applying header protection.
+func (k fixedKeys) protect(hdr, pay []byte, pnumOff int, pnum packetNumber) []byte {
+ pkt := k.pkt.protect(hdr, pay, pnum)
+ k.hdr.protect(pkt, pnumOff)
+ return pkt
+}
+
+// unprotect removes packet protection from a packet.
+//
+// On input, pkt contains the full protected packet, pnumOff the offset of
+// the packet number in the header, and pnumMax the largest packet number
+// seen in the number space of this packet.
+//
+// unprotect removes header protection from the header in pkt, and returns
+// the unprotected payload and packet number.
+func (k fixedKeys) unprotect(pkt []byte, pnumOff int, pnumMax packetNumber) (pay []byte, num packetNumber, err error) {
+ hdr, pay, pnum, err := k.hdr.unprotect(pkt, pnumOff, pnumMax)
+ if err != nil {
+ return nil, 0, err
+ }
+ pay, err = k.pkt.unprotect(hdr, pay, pnum)
+ if err != nil {
+ return nil, 0, err
+ }
+ return pay, pnum, nil
+}
+
+// A fixedKeyPair is a read/write pair of fixed keys.
+type fixedKeyPair struct {
+ r, w fixedKeys
+}
+
+func (k *fixedKeyPair) discard() {
+ *k = fixedKeyPair{}
+}
+
+func (k *fixedKeyPair) canRead() bool {
+ return k.r.isSet()
+}
+
+func (k *fixedKeyPair) canWrite() bool {
+ return k.w.isSet()
+}
+
+// An updatingKeys is a header protection key and updatable packet protection key.
+// updatingKeys are used for 1-RTT keys, where the packet protection key changes
+// over the lifetime of a connection.
+// https://www.rfc-editor.org/rfc/rfc9001#section-6
+type updatingKeys struct {
+ suite uint16
+ hdr headerKey
+ pkt [2]packetKey // current, next
+ nextSecret []byte // secret used to generate pkt[1]
+}
+
+func (k *updatingKeys) init(suite uint16, secret []byte) {
+ k.suite = suite
+ k.hdr.init(suite, secret)
+ // Initialize pkt[1] with secret_0, and then call update to generate secret_1.
+ k.pkt[1].init(suite, secret)
+ k.nextSecret = secret
+ k.update()
+}
+
+// update performs a key update.
+// The current key in pkt[0] is discarded.
+// The next key in pkt[1] becomes the current key.
+// A new next key is generated in pkt[1].
+func (k *updatingKeys) update() {
+ k.nextSecret = updateSecret(k.suite, k.nextSecret)
+ k.pkt[0] = k.pkt[1]
+ k.pkt[1].init(k.suite, k.nextSecret)
+}
+
+func updateSecret(suite uint16, secret []byte) (nextSecret []byte) {
+ h, _ := hashForSuite(suite)
+ return hkdfExpandLabel(h.New, secret, "quic ku", nil, len(secret))
+}
+
+// An updatingKeyPair is a read/write pair of updating keys.
+//
+// We keep two keys (current and next) in both read and write directions.
+// When an incoming packet's phase matches the current phase bit,
+// we unprotect it using the current keys; otherwise we use the next keys.
+//
+// When updating=false, outgoing packets are protected using the current phase.
+//
+// An update is initiated and updating is set to true when:
+// - we decide to initiate a key update; or
+// - we successfully unprotect a packet using the next keys,
+// indicating the peer has initiated a key update.
+//
+// When updating=true, outgoing packets are protected using the next phase.
+// We do not change the current phase bit or generate new keys yet.
+//
+// The update concludes when we receive an ACK frame for a packet sent
+// with the next keys. At this time, we set updating to false, flip the
+// phase bit, and update the keys. This permits us to handle up to 1-RTT
+// of reordered packets before discarding the previous phase's keys after
+// an update.
+type updatingKeyPair struct {
+ phase uint8 // current key phase (r.pkt[0], w.pkt[0])
+ updating bool
+ authFailures int64 // total packet unprotect failures
+ minSent packetNumber // min packet number sent since entering the updating state
+ minReceived packetNumber // min packet number received in the next phase
+ updateAfter packetNumber // packet number after which to initiate key update
+ r, w updatingKeys
+}
+
+func (k *updatingKeyPair) init() {
+ // 1-RTT packets until the first key update.
+ //
+ // We perform the first key update early in the connection so a peer
+ // which does not support key updates will fail rapidly,
+ // rather than after the connection has been long established.
+ k.updateAfter = 1000
+}
+
+func (k *updatingKeyPair) canRead() bool {
+ return k.r.hdr.hp != nil
+}
+
+func (k *updatingKeyPair) canWrite() bool {
+ return k.w.hdr.hp != nil
+}
+
+// handleAckFor finishes a key update after receiving an ACK for a packet in the next phase.
+func (k *updatingKeyPair) handleAckFor(pnum packetNumber) {
+ if k.updating && pnum >= k.minSent {
+ k.updating = false
+ k.phase ^= keyPhaseBit
+ k.r.update()
+ k.w.update()
+ }
+}
+
+// needAckEliciting reports whether we should send an ack-eliciting packet in the next phase.
+// The first packet sent in a phase is ack-eliciting, since the peer must acknowledge a
+// packet in the new phase for us to finish the update.
+func (k *updatingKeyPair) needAckEliciting() bool {
+ return k.updating && k.minSent == maxPacketNumber
+}
+
+// protect applies packet protection to a packet.
+// Parameters and returns are as for fixedKeyPair.protect.
+func (k *updatingKeyPair) protect(hdr, pay []byte, pnumOff int, pnum packetNumber) []byte {
+ var pkt []byte
+ if k.updating {
+ hdr[0] |= k.phase ^ keyPhaseBit
+ pkt = k.w.pkt[1].protect(hdr, pay, pnum)
+ k.minSent = min(pnum, k.minSent)
+ } else {
+ hdr[0] |= k.phase
+ pkt = k.w.pkt[0].protect(hdr, pay, pnum)
+ if pnum >= k.updateAfter {
+ // Initiate a key update, starting with the next packet we send.
+ //
+ // We do this after protecting the current packet
+ // to allow Conn.appendFrames to ensure that the first packet sent
+ // in the new phase is ack-eliciting.
+ k.updating = true
+ k.minSent = maxPacketNumber
+ k.minReceived = maxPacketNumber
+ // The lowest confidentiality limit for a supported AEAD is 2^23 packets.
+ // https://www.rfc-editor.org/rfc/rfc9001#section-6.6-5
+ //
+ // Schedule our next update for half that.
+ k.updateAfter += (1 << 22)
+ }
+ }
+ k.w.hdr.protect(pkt, pnumOff)
+ return pkt
+}
+
+// unprotect removes packet protection from a packet.
+// Parameters and returns are as for fixedKeyPair.unprotect.
+func (k *updatingKeyPair) unprotect(pkt []byte, pnumOff int, pnumMax packetNumber) (pay []byte, pnum packetNumber, err error) {
+ hdr, pay, pnum, err := k.r.hdr.unprotect(pkt, pnumOff, pnumMax)
+ if err != nil {
+ return nil, 0, err
+ }
+ // To avoid timing signals that might indicate the key phase bit is invalid,
+ // we always attempt to unprotect the packet with one key.
+ //
+ // If the key phase bit matches and the packet number doesn't come after
+ // the start of an in-progress update, use the current phase.
+ // Otherwise, use the next phase.
+ if hdr[0]&keyPhaseBit == k.phase && (!k.updating || pnum < k.minReceived) {
+ pay, err = k.r.pkt[0].unprotect(hdr, pay, pnum)
+ } else {
+ pay, err = k.r.pkt[1].unprotect(hdr, pay, pnum)
+ if err == nil {
+ if !k.updating {
+ // The peer has initiated a key update.
+ k.updating = true
+ k.minSent = maxPacketNumber
+ k.minReceived = pnum
+ } else {
+ k.minReceived = min(pnum, k.minReceived)
+ }
+ }
+ }
+ if err != nil {
+ k.authFailures++
+ if k.authFailures >= aeadIntegrityLimit(k.r.suite) {
+ return nil, 0, localTransportError{code: errAEADLimitReached}
+ }
+ return nil, 0, err
+ }
+ return pay, pnum, nil
+}
+
+// aeadIntegrityLimit returns the integrity limit for an AEAD:
+// The maximum number of received packets that may fail authentication
+// before closing the connection.
+//
+// https://www.rfc-editor.org/rfc/rfc9001#section-6.6-4
+func aeadIntegrityLimit(suite uint16) int64 {
+ switch suite {
+ case tls.TLS_AES_128_GCM_SHA256, tls.TLS_AES_256_GCM_SHA384:
+ return 1 << 52
+ case tls.TLS_CHACHA20_POLY1305_SHA256:
+ return 1 << 36
+ default:
+ panic("BUG: unknown cipher suite")
+ }
+}
+
+// https://www.rfc-editor.org/rfc/rfc9001#section-5.2-2
+var initialSalt = []byte{0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a}
+
+// initialKeys returns the keys used to protect Initial packets.
+//
+// The Initial packet keys are derived from the Destination Connection ID
+// field in the client's first Initial packet.
+//
+// https://www.rfc-editor.org/rfc/rfc9001#section-5.2
+func initialKeys(cid []byte, side connSide) fixedKeyPair {
+ initialSecret := hkdf.Extract(sha256.New, cid, initialSalt)
+ var clientKeys fixedKeys
+ clientSecret := hkdfExpandLabel(sha256.New, initialSecret, "client in", nil, sha256.Size)
+ clientKeys.init(tls.TLS_AES_128_GCM_SHA256, clientSecret)
+ var serverKeys fixedKeys
+ serverSecret := hkdfExpandLabel(sha256.New, initialSecret, "server in", nil, sha256.Size)
+ serverKeys.init(tls.TLS_AES_128_GCM_SHA256, serverSecret)
+ if side == clientSide {
+ return fixedKeyPair{r: serverKeys, w: clientKeys}
+ } else {
+ return fixedKeyPair{w: serverKeys, r: clientKeys}
+ }
+}
+
+// checkCipherSuite returns an error if suite is not a supported cipher suite.
+func checkCipherSuite(suite uint16) error {
+ switch suite {
+ case tls.TLS_AES_128_GCM_SHA256:
+ case tls.TLS_AES_256_GCM_SHA384:
+ case tls.TLS_CHACHA20_POLY1305_SHA256:
+ default:
+ return errors.New("invalid cipher suite")
+ }
+ return nil
+}
+
+func hashForSuite(suite uint16) (h crypto.Hash, keySize int) {
+ switch suite {
+ case tls.TLS_AES_128_GCM_SHA256:
+ return crypto.SHA256, 128 / 8
+ case tls.TLS_AES_256_GCM_SHA384:
+ return crypto.SHA384, 256 / 8
+ case tls.TLS_CHACHA20_POLY1305_SHA256:
+ return crypto.SHA256, chacha20.KeySize
+ default:
+ panic("BUG: unknown cipher suite")
+ }
+}
+
+// hdkfExpandLabel implements HKDF-Expand-Label from RFC 8446, Section 7.1.
+//
+// Copied from crypto/tls/key_schedule.go.
+func hkdfExpandLabel(hash func() hash.Hash, secret []byte, label string, context []byte, length int) []byte {
+ var hkdfLabel cryptobyte.Builder
+ hkdfLabel.AddUint16(uint16(length))
+ hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes([]byte("tls13 "))
+ b.AddBytes([]byte(label))
+ })
+ hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
+ b.AddBytes(context)
+ })
+ out := make([]byte, length)
+ n, err := hkdf.Expand(hash, secret, hkdfLabel.BytesOrPanic()).Read(out)
+ if err != nil || n != length {
+ panic("quic: HKDF-Expand-Label invocation failed unexpectedly")
+ }
+ return out
+}
diff --git a/internal/quic/packet_protection_test.go b/quic/packet_protection_test.go
similarity index 96%
rename from internal/quic/packet_protection_test.go
rename to quic/packet_protection_test.go
index 6495360a3..1fe130731 100644
--- a/internal/quic/packet_protection_test.go
+++ b/quic/packet_protection_test.go
@@ -16,10 +16,11 @@ func TestPacketProtection(t *testing.T) {
// Test cases from:
// https://www.rfc-editor.org/rfc/rfc9001#section-appendix.a
cid := unhex(`8394c8f03e515708`)
- initialClientKeys, initialServerKeys := initialKeys(cid)
+ k := initialKeys(cid, clientSide)
+ initialClientKeys, initialServerKeys := k.w, k.r
for _, test := range []struct {
name string
- k keys
+ k fixedKeys
pnum packetNumber
hdr []byte
pay []byte
@@ -103,15 +104,13 @@ func TestPacketProtection(t *testing.T) {
`),
}, {
name: "ChaCha20_Poly1305 Short Header",
- k: func() keys {
+ k: func() fixedKeys {
secret := unhex(`
9ac312a7f877468ebe69422748ad00a1
5443f18203a07d6060f688f30f21632b
`)
- k, err := newKeys(tls.TLS_CHACHA20_POLY1305_SHA256, secret)
- if err != nil {
- t.Fatal(err)
- }
+ var k fixedKeys
+ k.init(tls.TLS_CHACHA20_POLY1305_SHA256, secret)
return k
}(),
pnum: 654360564,
diff --git a/quic/packet_test.go b/quic/packet_test.go
new file mode 100644
index 000000000..58c584e16
--- /dev/null
+++ b/quic/packet_test.go
@@ -0,0 +1,247 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestPacketHeader(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ packet []byte
+ isLongHeader bool
+ packetType packetType
+ dstConnID []byte
+ }{{
+ // Initial packet from https://www.rfc-editor.org/rfc/rfc9001#section-a.1
+ // (truncated)
+ name: "rfc9001_a1",
+ packet: unhex(`
+ c000000001088394c8f03e5157080000 449e7b9aec34d1b1c98dd7689fb8ec11
+ `),
+ isLongHeader: true,
+ packetType: packetTypeInitial,
+ dstConnID: unhex(`8394c8f03e515708`),
+ }, {
+ // Initial packet from https://www.rfc-editor.org/rfc/rfc9001#section-a.3
+ // (truncated)
+ name: "rfc9001_a3",
+ packet: unhex(`
+ cf000000010008f067a5502a4262b500 4075c0d95a482cd0991cd25b0aac406a
+ `),
+ isLongHeader: true,
+ packetType: packetTypeInitial,
+ dstConnID: []byte{},
+ }, {
+ // Retry packet from https://www.rfc-editor.org/rfc/rfc9001#section-a.4
+ name: "rfc9001_a4",
+ packet: unhex(`
+ ff000000010008f067a5502a4262b574 6f6b656e04a265ba2eff4d829058fb3f
+ 0f2496ba
+ `),
+ isLongHeader: true,
+ packetType: packetTypeRetry,
+ dstConnID: []byte{},
+ }, {
+ // Short header packet from https://www.rfc-editor.org/rfc/rfc9001#section-a.5
+ name: "rfc9001_a5",
+ packet: unhex(`
+ 4cfe4189655e5cd55c41f69080575d7999c25a5bfb
+ `),
+ isLongHeader: false,
+ packetType: packetType1RTT,
+ dstConnID: unhex(`fe4189655e5cd55c`),
+ }, {
+ // Version Negotiation packet.
+ name: "version_negotiation",
+ packet: unhex(`
+ 80 00000000 01ff0001020304
+ `),
+ isLongHeader: true,
+ packetType: packetTypeVersionNegotiation,
+ dstConnID: []byte{0xff},
+ }, {
+ // Too-short packet.
+ name: "truncated_after_connid_length",
+ packet: unhex(`
+ cf0000000105
+ `),
+ isLongHeader: true,
+ packetType: packetTypeInitial,
+ dstConnID: nil,
+ }, {
+ // Too-short packet.
+ name: "truncated_after_version",
+ packet: unhex(`
+ cf00000001
+ `),
+ isLongHeader: true,
+ packetType: packetTypeInitial,
+ dstConnID: nil,
+ }, {
+ // Much too short packet.
+ name: "truncated_in_version",
+ packet: unhex(`
+ cf000000
+ `),
+ isLongHeader: true,
+ packetType: packetTypeInvalid,
+ dstConnID: nil,
+ }} {
+ t.Run(test.name, func(t *testing.T) {
+ if got, want := isLongHeader(test.packet[0]), test.isLongHeader; got != want {
+ t.Errorf("packet %x:\nisLongHeader(packet) = %v, want %v", test.packet, got, want)
+ }
+ if got, want := getPacketType(test.packet), test.packetType; got != want {
+ t.Errorf("packet %x:\ngetPacketType(packet) = %v, want %v", test.packet, got, want)
+ }
+ gotConnID, gotOK := dstConnIDForDatagram(test.packet)
+ wantConnID, wantOK := test.dstConnID, test.dstConnID != nil
+ if !bytes.Equal(gotConnID, wantConnID) || gotOK != wantOK {
+ t.Errorf("packet %x:\ndstConnIDForDatagram(packet) = {%x}, %v; want {%x}, %v", test.packet, gotConnID, gotOK, wantConnID, wantOK)
+ }
+ })
+ }
+}
+
+func TestEncodeDecodeVersionNegotiation(t *testing.T) {
+ dstConnID := []byte("this is a very long destination connection id")
+ srcConnID := []byte("this is a very long source connection id")
+ versions := []uint32{1, 0xffffffff}
+ got := appendVersionNegotiation([]byte{}, dstConnID, srcConnID, versions...)
+ want := bytes.Join([][]byte{{
+ 0b1100_0000, // header byte
+ 0, 0, 0, 0, // Version
+ byte(len(dstConnID)),
+ }, dstConnID, {
+ byte(len(srcConnID)),
+ }, srcConnID, {
+ 0x00, 0x00, 0x00, 0x01,
+ 0xff, 0xff, 0xff, 0xff,
+ }}, nil)
+ if !bytes.Equal(got, want) {
+ t.Fatalf("appendVersionNegotiation(nil, %x, %x, %v):\ngot %x\nwant %x",
+ dstConnID, srcConnID, versions, got, want)
+ }
+ gotDst, gotSrc, gotVersionBytes := parseVersionNegotiation(got)
+ if got, want := gotDst, dstConnID; !bytes.Equal(got, want) {
+ t.Errorf("parseVersionNegotiation: got dstConnID = %x, want %x", got, want)
+ }
+ if got, want := gotSrc, srcConnID; !bytes.Equal(got, want) {
+ t.Errorf("parseVersionNegotiation: got srcConnID = %x, want %x", got, want)
+ }
+ var gotVersions []uint32
+ for len(gotVersionBytes) >= 4 {
+ gotVersions = append(gotVersions, binary.BigEndian.Uint32(gotVersionBytes))
+ gotVersionBytes = gotVersionBytes[4:]
+ }
+ if got, want := gotVersions, versions; !reflect.DeepEqual(got, want) {
+ t.Errorf("parseVersionNegotiation: got versions = %v, want %v", got, want)
+ }
+}
+
+func TestParseGenericLongHeaderPacket(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ packet []byte
+ version uint32
+ dstConnID []byte
+ srcConnID []byte
+ data []byte
+ }{{
+ name: "long header packet",
+ packet: unhex(`
+ 80 01020304 04a1a2a3a4 05b1b2b3b4b5 c1
+ `),
+ version: 0x01020304,
+ dstConnID: unhex(`a1a2a3a4`),
+ srcConnID: unhex(`b1b2b3b4b5`),
+ data: unhex(`c1`),
+ }, {
+ name: "zero everything",
+ packet: unhex(`
+ 80 00000000 00 00
+ `),
+ version: 0,
+ dstConnID: []byte{},
+ srcConnID: []byte{},
+ data: []byte{},
+ }} {
+ t.Run(test.name, func(t *testing.T) {
+ p, ok := parseGenericLongHeaderPacket(test.packet)
+ if !ok {
+ t.Fatalf("parseGenericLongHeaderPacket() = _, false; want true")
+ }
+ if got, want := p.version, test.version; got != want {
+ t.Errorf("version = %v, want %v", got, want)
+ }
+ if got, want := p.dstConnID, test.dstConnID; !bytes.Equal(got, want) {
+ t.Errorf("Destination Connection ID = {%x}, want {%x}", got, want)
+ }
+ if got, want := p.srcConnID, test.srcConnID; !bytes.Equal(got, want) {
+ t.Errorf("Source Connection ID = {%x}, want {%x}", got, want)
+ }
+ if got, want := p.data, test.data; !bytes.Equal(got, want) {
+ t.Errorf("Data = {%x}, want {%x}", got, want)
+ }
+ })
+ }
+}
+
+func TestParseGenericLongHeaderPacketErrors(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ packet []byte
+ }{{
+ name: "short header packet",
+ packet: unhex(`
+ 00 01020304 04a1a2a3a4 05b1b2b3b4b5 c1
+ `),
+ }, {
+ name: "packet too short",
+ packet: unhex(`
+ 80 000000
+ `),
+ }, {
+ name: "destination id too long",
+ packet: unhex(`
+ 80 00000000 02 00
+ `),
+ }, {
+ name: "source id too long",
+ packet: unhex(`
+ 80 00000000 00 01
+ `),
+ }} {
+ t.Run(test.name, func(t *testing.T) {
+ _, ok := parseGenericLongHeaderPacket(test.packet)
+ if ok {
+ t.Fatalf("parseGenericLongHeaderPacket() = _, true; want false")
+ }
+ })
+ }
+}
+
+func unhex(s string) []byte {
+ b, err := hex.DecodeString(strings.Map(func(c rune) rune {
+ switch c {
+ case ' ', '\t', '\n':
+ return -1
+ }
+ return c
+ }, s))
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
diff --git a/internal/quic/packet_writer.go b/quic/packet_writer.go
similarity index 92%
rename from internal/quic/packet_writer.go
rename to quic/packet_writer.go
index 97987e0c2..e4d71e622 100644
--- a/internal/quic/packet_writer.go
+++ b/quic/packet_writer.go
@@ -47,6 +47,11 @@ func (w *packetWriter) datagram() []byte {
return w.b
}
+// packet returns the size of the current packet.
+func (w *packetWriter) packetLen() int {
+ return len(w.b[w.pktOff:]) + aeadOverhead
+}
+
// payload returns the payload of the current packet.
func (w *packetWriter) payload() []byte {
return w.b[w.payOff:]
@@ -100,7 +105,7 @@ func (w *packetWriter) startProtectedLongHeaderPacket(pnumMaxAcked packetNumber,
// finishProtectedLongHeaderPacket finishes writing an Initial, 0-RTT, or Handshake packet,
// canceling the packet if it contains no payload.
// It returns a sentPacket describing the packet, or nil if no packet was written.
-func (w *packetWriter) finishProtectedLongHeaderPacket(pnumMaxAcked packetNumber, k keys, p longPacket) *sentPacket {
+func (w *packetWriter) finishProtectedLongHeaderPacket(pnumMaxAcked packetNumber, k fixedKeys, p longPacket) *sentPacket {
if len(w.b) == w.payOff {
// The payload is empty, so just abandon the packet.
w.b = w.b[:w.pktOff]
@@ -135,7 +140,8 @@ func (w *packetWriter) finishProtectedLongHeaderPacket(pnumMaxAcked packetNumber
pnumOff := len(hdr)
hdr = appendPacketNumber(hdr, p.num, pnumMaxAcked)
- return w.protect(hdr[w.pktOff:], p.num, pnumOff, k)
+ k.protect(hdr[w.pktOff:], w.b[len(hdr):], pnumOff-w.pktOff, p.num)
+ return w.finish(p.ptype, p.num)
}
// start1RTTPacket starts writing a 1-RTT (short header) packet.
@@ -162,14 +168,13 @@ func (w *packetWriter) start1RTTPacket(pnum, pnumMaxAcked packetNumber, dstConnI
// finish1RTTPacket finishes writing a 1-RTT packet,
// canceling the packet if it contains no payload.
// It returns a sentPacket describing the packet, or nil if no packet was written.
-func (w *packetWriter) finish1RTTPacket(pnum, pnumMaxAcked packetNumber, dstConnID []byte, k keys) *sentPacket {
+func (w *packetWriter) finish1RTTPacket(pnum, pnumMaxAcked packetNumber, dstConnID []byte, k *updatingKeyPair) *sentPacket {
if len(w.b) == w.payOff {
// The payload is empty, so just abandon the packet.
w.b = w.b[:w.pktOff]
return nil
}
// TODO: Spin
- // TODO: Key phase
pnumLen := packetNumberLength(pnum, pnumMaxAcked)
hdr := w.b[:w.pktOff]
hdr = append(hdr, 0x40|byte(pnumLen-1))
@@ -177,7 +182,8 @@ func (w *packetWriter) finish1RTTPacket(pnum, pnumMaxAcked packetNumber, dstConn
pnumOff := len(hdr)
hdr = appendPacketNumber(hdr, pnum, pnumMaxAcked)
w.padPacketLength(pnumLen)
- return w.protect(hdr[w.pktOff:], pnum, pnumOff, k)
+ k.protect(hdr[w.pktOff:], w.b[len(hdr):], pnumOff-w.pktOff, pnum)
+ return w.finish(packetType1RTT, pnum)
}
// padPacketLength pads out the payload of the current packet to the minimum size,
@@ -197,11 +203,11 @@ func (w *packetWriter) padPacketLength(pnumLen int) int {
return plen
}
-// protect applies packet protection and finishes the current packet.
-func (w *packetWriter) protect(hdr []byte, pnum packetNumber, pnumOff int, k keys) *sentPacket {
- k.protect(hdr, w.b[w.pktOff+len(hdr):], pnumOff-w.pktOff, pnum)
+// finish finishes the current packet after protection is applied.
+func (w *packetWriter) finish(ptype packetType, pnum packetNumber) *sentPacket {
w.b = w.b[:len(w.b)+aeadOverhead]
w.sent.size = len(w.b) - w.pktOff
+ w.sent.ptype = ptype
w.sent.num = pnum
sent := w.sent
w.sent = nil
@@ -237,7 +243,7 @@ func (w *packetWriter) appendPingFrame() (added bool) {
return false
}
w.b = append(w.b, frameTypePing)
- w.sent.appendAckElicitingFrame(frameTypePing)
+ w.sent.markAckEliciting() // no need to record the frame itself
return true
}
@@ -379,11 +385,7 @@ func (w *packetWriter) appendStreamFrame(id streamID, off int64, size int, fin b
w.b = appendVarint(w.b, uint64(size))
start := len(w.b)
w.b = w.b[:start+size]
- if fin {
- w.sent.appendAckElicitingFrame(frameTypeStreamBase | streamFinBit)
- } else {
- w.sent.appendAckElicitingFrame(frameTypeStreamBase)
- }
+ w.sent.appendAckElicitingFrame(typ & (frameTypeStreamBase | streamFinBit))
w.sent.appendInt(uint64(id))
w.sent.appendOffAndSize(off, size)
return w.b[start:][:size], true
@@ -479,33 +481,34 @@ func (w *packetWriter) appendNewConnectionIDFrame(seq, retirePriorTo int64, conn
return true
}
-func (w *packetWriter) appendRetireConnectionIDFrame(seq uint64) (added bool) {
- if w.avail() < 1+sizeVarint(seq) {
+func (w *packetWriter) appendRetireConnectionIDFrame(seq int64) (added bool) {
+ if w.avail() < 1+sizeVarint(uint64(seq)) {
return false
}
w.b = append(w.b, frameTypeRetireConnectionID)
- w.b = appendVarint(w.b, seq)
+ w.b = appendVarint(w.b, uint64(seq))
w.sent.appendAckElicitingFrame(frameTypeRetireConnectionID)
+ w.sent.appendInt(uint64(seq))
return true
}
-func (w *packetWriter) appendPathChallengeFrame(data uint64) (added bool) {
+func (w *packetWriter) appendPathChallengeFrame(data pathChallengeData) (added bool) {
if w.avail() < 1+8 {
return false
}
w.b = append(w.b, frameTypePathChallenge)
- w.b = binary.BigEndian.AppendUint64(w.b, data)
- w.sent.appendAckElicitingFrame(frameTypePathChallenge)
+ w.b = append(w.b, data[:]...)
+ w.sent.markAckEliciting() // no need to record the frame itself
return true
}
-func (w *packetWriter) appendPathResponseFrame(data uint64) (added bool) {
+func (w *packetWriter) appendPathResponseFrame(data pathChallengeData) (added bool) {
if w.avail() < 1+8 {
return false
}
w.b = append(w.b, frameTypePathResponse)
- w.b = binary.BigEndian.AppendUint64(w.b, data)
- w.sent.appendAckElicitingFrame(frameTypePathResponse)
+ w.b = append(w.b, data[:]...)
+ w.sent.markAckEliciting() // no need to record the frame itself
return true
}
diff --git a/quic/path.go b/quic/path.go
new file mode 100644
index 000000000..8c237dd45
--- /dev/null
+++ b/quic/path.go
@@ -0,0 +1,89 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import "time"
+
+type pathState struct {
+ // Response to a peer's PATH_CHALLENGE.
+ // This is not a sentVal, because we don't resend lost PATH_RESPONSE frames.
+ // We only track the most recent PATH_CHALLENGE.
+ // If the peer sends a second PATH_CHALLENGE before we respond to the first,
+ // we'll drop the first response.
+ sendPathResponse pathResponseType
+ data pathChallengeData
+}
+
+// pathChallengeData is data carried in a PATH_CHALLENGE or PATH_RESPONSE frame.
+type pathChallengeData [64 / 8]byte
+
+type pathResponseType uint8
+
+const (
+ pathResponseNotNeeded = pathResponseType(iota)
+ pathResponseSmall // send PATH_RESPONSE, do not expand datagram
+ pathResponseExpanded // send PATH_RESPONSE, expand datagram to 1200 bytes
+)
+
+func (c *Conn) handlePathChallenge(_ time.Time, dgram *datagram, data pathChallengeData) {
+ // A PATH_RESPONSE is sent in a datagram expanded to 1200 bytes,
+ // except when this would exceed the anti-amplification limit.
+ //
+ // Rather than maintaining anti-amplification state for each path
+ // we may be sending a PATH_RESPONSE on, follow the following heuristic:
+ //
+ // If we receive a PATH_CHALLENGE in an expanded datagram,
+ // respond with an expanded datagram.
+ //
+ // If we receive a PATH_CHALLENGE in a non-expanded datagram,
+ // then the peer is presumably blocked by its own anti-amplification limit.
+ // Respond with a non-expanded datagram. Receiving this PATH_RESPONSE
+ // will validate the path to the peer, remove its anti-amplification limit,
+ // and permit it to send a followup PATH_CHALLENGE in an expanded datagram.
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-8.2.1
+ if len(dgram.b) >= smallestMaxDatagramSize {
+ c.path.sendPathResponse = pathResponseExpanded
+ } else {
+ c.path.sendPathResponse = pathResponseSmall
+ }
+ c.path.data = data
+}
+
+func (c *Conn) handlePathResponse(now time.Time, _ pathChallengeData) {
+ // "If the content of a PATH_RESPONSE frame does not match the content of
+ // a PATH_CHALLENGE frame previously sent by the endpoint,
+ // the endpoint MAY generate a connection error of type PROTOCOL_VIOLATION."
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-19.18-4
+ //
+ // We never send PATH_CHALLENGE frames.
+ c.abort(now, localTransportError{
+ code: errProtocolViolation,
+ reason: "PATH_RESPONSE received when no PATH_CHALLENGE sent",
+ })
+}
+
+// appendPathFrames appends path validation related frames to the current packet.
+// If the return value pad is true, then the packet should be padded to 1200 bytes.
+func (c *Conn) appendPathFrames() (pad, ok bool) {
+ if c.path.sendPathResponse == pathResponseNotNeeded {
+ return pad, true
+ }
+ // We're required to send the PATH_RESPONSE on the path where the
+ // PATH_CHALLENGE was received (RFC 9000, Section 8.2.2).
+ //
+ // At the moment, we don't support path migration and reject packets if
+ // the peer changes its source address, so just sending the PATH_RESPONSE
+ // in a regular datagram is fine.
+ if !c.w.appendPathResponseFrame(c.path.data) {
+ return pad, false
+ }
+ if c.path.sendPathResponse == pathResponseExpanded {
+ pad = true
+ }
+ c.path.sendPathResponse = pathResponseNotNeeded
+ return pad, true
+}
diff --git a/quic/path_test.go b/quic/path_test.go
new file mode 100644
index 000000000..a309ed14b
--- /dev/null
+++ b/quic/path_test.go
@@ -0,0 +1,66 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "testing"
+)
+
+func TestPathChallengeReceived(t *testing.T) {
+ for _, test := range []struct {
+ name string
+ padTo int
+ wantPadding int
+ }{{
+ name: "unexpanded",
+ padTo: 0,
+ wantPadding: 0,
+ }, {
+ name: "expanded",
+ padTo: 1200,
+ wantPadding: 1200,
+ }} {
+ // "The recipient of [a PATH_CHALLENGE] frame MUST generate
+ // a PATH_RESPONSE frame [...] containing the same Data value."
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-19.17-7
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ data := pathChallengeData{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}
+ tc.writeFrames(packetType1RTT, debugFramePathChallenge{
+ data: data,
+ }, debugFramePadding{
+ to: test.padTo,
+ })
+ tc.wantFrame("response to PATH_CHALLENGE",
+ packetType1RTT, debugFramePathResponse{
+ data: data,
+ })
+ if got, want := tc.lastDatagram.paddedSize, test.wantPadding; got != want {
+ t.Errorf("PATH_RESPONSE expanded to %v bytes, want %v", got, want)
+ }
+ tc.wantIdle("connection is idle")
+ }
+}
+
+func TestPathResponseMismatchReceived(t *testing.T) {
+ // "If the content of a PATH_RESPONSE frame does not match the content of
+ // a PATH_CHALLENGE frame previously sent by the endpoint,
+ // the endpoint MAY generate a connection error of type PROTOCOL_VIOLATION."
+ // https://www.rfc-editor.org/rfc/rfc9000.html#section-19.18-4
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+ tc.ignoreFrame(frameTypeAck)
+ tc.writeFrames(packetType1RTT, debugFramePathResponse{
+ data: pathChallengeData{},
+ })
+ tc.wantFrame("invalid PATH_RESPONSE causes the connection to close",
+ packetType1RTT, debugFrameConnectionCloseTransport{
+ code: errProtocolViolation,
+ },
+ )
+}
diff --git a/quic/ping.go b/quic/ping.go
new file mode 100644
index 000000000..3e7d9c51b
--- /dev/null
+++ b/quic/ping.go
@@ -0,0 +1,16 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import "time"
+
+func (c *Conn) ping(space numberSpace) {
+ c.sendMsg(func(now time.Time, c *Conn) {
+ c.testSendPing.setUnsent()
+ c.testSendPingSpace = space
+ })
+}
diff --git a/quic/ping_test.go b/quic/ping_test.go
new file mode 100644
index 000000000..a8fdf2567
--- /dev/null
+++ b/quic/ping_test.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import "testing"
+
+func TestPing(t *testing.T) {
+ tc := newTestConn(t, clientSide)
+ tc.handshake()
+
+ tc.conn.ping(appDataSpace)
+ tc.wantFrame("connection should send a PING frame",
+ packetType1RTT, debugFramePing{})
+
+ tc.advanceToTimer()
+ tc.wantFrame("on PTO, connection should send another PING frame",
+ packetType1RTT, debugFramePing{})
+
+ tc.wantIdle("after sending PTO probe, no additional frames to send")
+}
+
+func TestAck(t *testing.T) {
+ tc := newTestConn(t, serverSide)
+ tc.handshake()
+
+ // Send two packets, to trigger an immediate ACK.
+ tc.writeFrames(packetType1RTT,
+ debugFramePing{},
+ )
+ tc.writeFrames(packetType1RTT,
+ debugFramePing{},
+ )
+ tc.wantFrame("connection should respond to ack-eliciting packet with an ACK frame",
+ packetType1RTT,
+ debugFrameAck{
+ ranges: []i64range[packetNumber]{{0, 4}},
+ },
+ )
+}
diff --git a/quic/pipe.go b/quic/pipe.go
new file mode 100644
index 000000000..75cf76db2
--- /dev/null
+++ b/quic/pipe.go
@@ -0,0 +1,175 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "sync"
+)
+
+// A pipe is a byte buffer used in implementing streams.
+//
+// A pipe contains a window of stream data.
+// Random access reads and writes are supported within the window.
+// Writing past the end of the window extends it.
+// Data may be discarded from the start of the pipe, advancing the window.
+type pipe struct {
+ start int64 // stream position of first stored byte
+ end int64 // stream position just past the last stored byte
+ head *pipebuf // if non-nil, then head.off + len(head.b) > start
+ tail *pipebuf // if non-nil, then tail.off + len(tail.b) == end
+}
+
+type pipebuf struct {
+ off int64 // stream position of b[0]
+ b []byte
+ next *pipebuf
+}
+
+func (pb *pipebuf) end() int64 {
+ return pb.off + int64(len(pb.b))
+}
+
+var pipebufPool = sync.Pool{
+ New: func() any {
+ return &pipebuf{
+ b: make([]byte, 4096),
+ }
+ },
+}
+
+func newPipebuf() *pipebuf {
+ return pipebufPool.Get().(*pipebuf)
+}
+
+func (b *pipebuf) recycle() {
+ b.off = 0
+ b.next = nil
+ pipebufPool.Put(b)
+}
+
+// writeAt writes len(b) bytes to the pipe at offset off.
+//
+// Writes to offsets before p.start are discarded.
+// Writes to offsets after p.end extend the pipe window.
+func (p *pipe) writeAt(b []byte, off int64) {
+ end := off + int64(len(b))
+ if end > p.end {
+ p.end = end
+ } else if end <= p.start {
+ return
+ }
+
+ if off < p.start {
+ // Discard the portion of b which falls before p.start.
+ trim := p.start - off
+ b = b[trim:]
+ off = p.start
+ }
+
+ if p.head == nil {
+ p.head = newPipebuf()
+ p.head.off = p.start
+ p.tail = p.head
+ }
+ pb := p.head
+ if off >= p.tail.off {
+ // Common case: Writing past the end of the pipe.
+ pb = p.tail
+ }
+ for {
+ pboff := off - pb.off
+ if pboff < int64(len(pb.b)) {
+ n := copy(pb.b[pboff:], b)
+ if n == len(b) {
+ return
+ }
+ off += int64(n)
+ b = b[n:]
+ }
+ if pb.next == nil {
+ pb.next = newPipebuf()
+ pb.next.off = pb.off + int64(len(pb.b))
+ p.tail = pb.next
+ }
+ pb = pb.next
+ }
+}
+
+// copy copies len(b) bytes into b starting from off.
+// The pipe must contain [off, off+len(b)).
+func (p *pipe) copy(off int64, b []byte) {
+ dst := b[:0]
+ p.read(off, len(b), func(c []byte) error {
+ dst = append(dst, c...)
+ return nil
+ })
+}
+
+// read calls f with the data in [off, off+n)
+// The data may be provided sequentially across multiple calls to f.
+// Note that read (unlike an io.Reader) does not consume the read data.
+func (p *pipe) read(off int64, n int, f func([]byte) error) error {
+ if off < p.start {
+ panic("invalid read range")
+ }
+ for pb := p.head; pb != nil && n > 0; pb = pb.next {
+ if off >= pb.end() {
+ continue
+ }
+ b := pb.b[off-pb.off:]
+ if len(b) > n {
+ b = b[:n]
+ }
+ off += int64(len(b))
+ n -= len(b)
+ if err := f(b); err != nil {
+ return err
+ }
+ }
+ if n > 0 {
+ panic("invalid read range")
+ }
+ return nil
+}
+
+// peek returns a reference to up to n bytes of internal data buffer, starting at p.start.
+// The returned slice is valid until the next call to discardBefore.
+// The length of the returned slice will be in the range [0,n].
+func (p *pipe) peek(n int64) []byte {
+ pb := p.head
+ if pb == nil {
+ return nil
+ }
+ b := pb.b[p.start-pb.off:]
+ return b[:min(int64(len(b)), n)]
+}
+
+// availableBuffer returns the available contiguous, allocated buffer space
+// following the pipe window.
+//
+// This is used by the stream write fast path, which makes multiple writes into the pipe buffer
+// without a lock, and then adjusts p.end at a later time with a lock held.
+func (p *pipe) availableBuffer() []byte {
+ if p.tail == nil {
+ return nil
+ }
+ return p.tail.b[p.end-p.tail.off:]
+}
+
+// discardBefore discards all data prior to off.
+func (p *pipe) discardBefore(off int64) {
+ for p.head != nil && p.head.end() < off {
+ head := p.head
+ p.head = p.head.next
+ head.recycle()
+ }
+ if p.head == nil {
+ p.tail = nil
+ }
+ p.start = off
+ p.end = max(p.end, off)
+}
diff --git a/quic/pipe_test.go b/quic/pipe_test.go
new file mode 100644
index 000000000..bcb3a8bc0
--- /dev/null
+++ b/quic/pipe_test.go
@@ -0,0 +1,104 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "bytes"
+ "math/rand"
+ "testing"
+)
+
+func TestPipeWrites(t *testing.T) {
+ type writeOp struct {
+ start, end int64
+ }
+ type discardBeforeOp struct {
+ off int64
+ }
+ type op any
+ src := make([]byte, 65536)
+ rand.New(rand.NewSource(0)).Read(src)
+ for _, test := range []struct {
+ desc string
+ ops []op
+ }{{
+ desc: "sequential writes",
+ ops: []op{
+ writeOp{0, 1024},
+ writeOp{1024, 4096},
+ writeOp{4096, 65536},
+ },
+ }, {
+ desc: "disordered overlapping writes",
+ ops: []op{
+ writeOp{2000, 8000},
+ writeOp{0, 3000},
+ writeOp{7000, 12000},
+ },
+ }, {
+ desc: "write to discarded region",
+ ops: []op{
+ writeOp{0, 65536},
+ discardBeforeOp{32768},
+ writeOp{0, 1000},
+ writeOp{3000, 5000},
+ writeOp{0, 32768},
+ },
+ }, {
+ desc: "write overlaps discarded region",
+ ops: []op{
+ discardBeforeOp{10000},
+ writeOp{0, 20000},
+ },
+ }, {
+ desc: "discard everything",
+ ops: []op{
+ writeOp{0, 10000},
+ discardBeforeOp{10000},
+ writeOp{10000, 20000},
+ },
+ }, {
+ desc: "discard before writing",
+ ops: []op{
+ discardBeforeOp{1000},
+ writeOp{0, 1},
+ },
+ }} {
+ var p pipe
+ var wantset rangeset[int64]
+ var wantStart, wantEnd int64
+ for i, o := range test.ops {
+ switch o := o.(type) {
+ case writeOp:
+ p.writeAt(src[o.start:o.end], o.start)
+ wantset.add(o.start, o.end)
+ wantset.sub(0, wantStart)
+ if o.end > wantEnd {
+ wantEnd = o.end
+ }
+ case discardBeforeOp:
+ p.discardBefore(o.off)
+ wantset.sub(0, o.off)
+ wantStart = o.off
+ if o.off > wantEnd {
+ wantEnd = o.off
+ }
+ }
+ if p.start != wantStart || p.end != wantEnd {
+ t.Errorf("%v: after %#v p contains [%v,%v), want [%v,%v)", test.desc, test.ops[:i+1], p.start, p.end, wantStart, wantEnd)
+ }
+ for _, r := range wantset {
+ want := src[r.start:][:r.size()]
+ got := make([]byte, r.size())
+ p.copy(r.start, got)
+ if !bytes.Equal(got, want) {
+ t.Errorf("%v after %#v, mismatch in data in %v", test.desc, test.ops[:i+1], r)
+ }
+ }
+ }
+ }
+}
diff --git a/quic/qlog.go b/quic/qlog.go
new file mode 100644
index 000000000..36831252c
--- /dev/null
+++ b/quic/qlog.go
@@ -0,0 +1,274 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package quic
+
+import (
+ "context"
+ "encoding/hex"
+ "log/slog"
+ "net/netip"
+ "time"
+)
+
+// Log levels for qlog events.
+const (
+ // QLogLevelFrame includes per-frame information.
+ // When this level is enabled, packet_sent and packet_received events will
+ // contain information on individual frames sent/received.
+ QLogLevelFrame = slog.Level(-6)
+
+ // QLogLevelPacket events occur at most once per packet sent or received.
+ //
+ // For example: packet_sent, packet_received.
+ QLogLevelPacket = slog.Level(-4)
+
+ // QLogLevelConn events occur multiple times over a connection's lifetime,
+ // but less often than the frequency of individual packets.
+ //
+ // For example: connection_state_updated.
+ QLogLevelConn = slog.Level(-2)
+
+ // QLogLevelEndpoint events occur at most once per connection.
+ //
+ // For example: connection_started, connection_closed.
+ QLogLevelEndpoint = slog.Level(0)
+)
+
+func (c *Conn) logEnabled(level slog.Level) bool {
+ return logEnabled(c.log, level)
+}
+
+func logEnabled(log *slog.Logger, level slog.Level) bool {
+ return log != nil && log.Enabled(context.Background(), level)
+}
+
+// slogHexstring returns a slog.Attr for a value of the hexstring type.
+//
+// https://www.ietf.org/archive/id/draft-ietf-quic-qlog-main-schema-04.html#section-1.1.1
+func slogHexstring(key string, value []byte) slog.Attr {
+ return slog.String(key, hex.EncodeToString(value))
+}
+
+func slogAddr(key string, value netip.Addr) slog.Attr {
+ return slog.String(key, value.String())
+}
+
+func (c *Conn) logConnectionStarted(originalDstConnID []byte, peerAddr netip.AddrPort) {
+ if c.config.QLogLogger == nil ||
+ !c.config.QLogLogger.Enabled(context.Background(), QLogLevelEndpoint) {
+ return
+ }
+ var vantage string
+ if c.side == clientSide {
+ vantage = "client"
+ originalDstConnID = c.connIDState.originalDstConnID
+ } else {
+ vantage = "server"
+ }
+ // A qlog Trace container includes some metadata (title, description, vantage_point)
+ // and a list of Events. The Trace also includes a common_fields field setting field
+ // values common to all events in the trace.
+ //
+ // Trace = {
+ // ? title: text
+ // ? description: text
+ // ? configuration: Configuration
+ // ? common_fields: CommonFields
+ // ? vantage_point: VantagePoint
+ // events: [* Event]
+ // }
+ //
+ // To map this into slog's data model, we start each per-connection trace with a With
+ // call that includes both the trace metadata and the common fields.
+ //
+ // This means that in slog's model, each trace event will also include
+ // the Trace metadata fields (vantage_point), which is a divergence from the qlog model.
+ c.log = c.config.QLogLogger.With(
+ // The group_id permits associating traces taken from different vantage points
+ // for the same connection.
+ //
+ // We use the original destination connection ID as the group ID.
+ //
+ // https://www.ietf.org/archive/id/draft-ietf-quic-qlog-main-schema-04.html#section-3.4.6
+ slogHexstring("group_id", originalDstConnID),
+ slog.Group("vantage_point",
+ slog.String("name", "go quic"),
+ slog.String("type", vantage),
+ ),
+ )
+ localAddr := c.endpoint.LocalAddr()
+ // https://www.ietf.org/archive/id/draft-ietf-quic-qlog-quic-events-03.html#section-4.2
+ c.log.LogAttrs(context.Background(), QLogLevelEndpoint,
+ "connectivity:connection_started",
+ slogAddr("src_ip", localAddr.Addr()),
+ slog.Int("src_port", int(localAddr.Port())),
+ slogHexstring("src_cid", c.connIDState.local[0].cid),
+ slogAddr("dst_ip", peerAddr.Addr()),
+ slog.Int("dst_port", int(peerAddr.Port())),
+ slogHexstring("dst_cid", c.connIDState.remote[0].cid),
+ )
+}
+
+func (c *Conn) logConnectionClosed() {
+ if !c.logEnabled(QLogLevelEndpoint) {
+ return
+ }
+ err := c.lifetime.finalErr
+ trigger := "error"
+ switch e := err.(type) {
+ case *ApplicationError:
+ // TODO: Distinguish between peer and locally-initiated close.
+ trigger = "application"
+ case localTransportError:
+ switch err {
+ case errHandshakeTimeout:
+ trigger = "handshake_timeout"
+ default:
+ if e.code == errNo {
+ trigger = "clean"
+ }
+ }
+ case peerTransportError:
+ if e.code == errNo {
+ trigger = "clean"
+ }
+ default:
+ switch err {
+ case errIdleTimeout:
+ trigger = "idle_timeout"
+ case errStatelessReset:
+ trigger = "stateless_reset"
+ }
+ }
+ // https://www.ietf.org/archive/id/draft-ietf-quic-qlog-quic-events-03.html#section-4.3
+ c.log.LogAttrs(context.Background(), QLogLevelEndpoint,
+ "connectivity:connection_closed",
+ slog.String("trigger", trigger),
+ )
+}
+
+func (c *Conn) logPacketDropped(dgram *datagram) {
+ c.log.LogAttrs(context.Background(), QLogLevelPacket,
+ "connectivity:packet_dropped",
+ )
+}
+
+func (c *Conn) logLongPacketReceived(p longPacket, pkt []byte) {
+ var frames slog.Attr
+ if c.logEnabled(QLogLevelFrame) {
+ frames = c.packetFramesAttr(p.payload)
+ }
+ c.log.LogAttrs(context.Background(), QLogLevelPacket,
+ "transport:packet_received",
+ slog.Group("header",
+ slog.String("packet_type", p.ptype.qlogString()),
+ slog.Uint64("packet_number", uint64(p.num)),
+ slog.Uint64("flags", uint64(pkt[0])),
+ slogHexstring("scid", p.srcConnID),
+ slogHexstring("dcid", p.dstConnID),
+ ),
+ slog.Group("raw",
+ slog.Int("length", len(pkt)),
+ ),
+ frames,
+ )
+}
+
+func (c *Conn) log1RTTPacketReceived(p shortPacket, pkt []byte) {
+ var frames slog.Attr
+ if c.logEnabled(QLogLevelFrame) {
+ frames = c.packetFramesAttr(p.payload)
+ }
+ dstConnID, _ := dstConnIDForDatagram(pkt)
+ c.log.LogAttrs(context.Background(), QLogLevelPacket,
+ "transport:packet_received",
+ slog.Group("header",
+ slog.String("packet_type", packetType1RTT.qlogString()),
+ slog.Uint64("packet_number", uint64(p.num)),
+ slog.Uint64("flags", uint64(pkt[0])),
+ slogHexstring("dcid", dstConnID),
+ ),
+ slog.Group("raw",
+ slog.Int("length", len(pkt)),
+ ),
+ frames,
+ )
+}
+
+func (c *Conn) logPacketSent(ptype packetType, pnum packetNumber, src, dst []byte, pktLen int, payload []byte) {
+ var frames slog.Attr
+ if c.logEnabled(QLogLevelFrame) {
+ frames = c.packetFramesAttr(payload)
+ }
+ var scid slog.Attr
+ if len(src) > 0 {
+ scid = slogHexstring("scid", src)
+ }
+ c.log.LogAttrs(context.Background(), QLogLevelPacket,
+ "transport:packet_sent",
+ slog.Group("header",
+ slog.String("packet_type", ptype.qlogString()),
+ slog.Uint64("packet_number", uint64(pnum)),
+ scid,
+ slogHexstring("dcid", dst),
+ ),
+ slog.Group("raw",
+ slog.Int("length", pktLen),
+ ),
+ frames,
+ )
+}
+
+// packetFramesAttr returns the "frames" attribute containing the frames in a packet.
+// We currently pass this as a slog Any containing a []slog.Value,
+// where each Value is a debugFrame that implements slog.LogValuer.
+//
+// This isn't tremendously efficient, but avoids the need to put a JSON encoder
+// in the quic package or a frame parser in the qlog package.
+func (c *Conn) packetFramesAttr(payload []byte) slog.Attr {
+ var frames []slog.Value
+ for len(payload) > 0 {
+ f, n := parseDebugFrame(payload)
+ if n < 0 {
+ break
+ }
+ payload = payload[n:]
+ switch f := f.(type) {
+ case debugFrameAck:
+ // The qlog ACK frame contains the ACK Delay field as a duration.
+ // Interpreting the contents of this field as a duration requires
+ // knowing the peer's ack_delay_exponent transport parameter,
+ // and it's possible for us to parse an ACK frame before we've
+ // received that parameter.
+ //
+ // We could plumb connection state down into the frame parser,
+ // but for now let's minimize the amount of code that needs to
+ // deal with this and convert the unscaled value into a scaled one here.
+ ackDelay := time.Duration(-1)
+ if c.peerAckDelayExponent >= 0 {
+ ackDelay = f.ackDelay.Duration(uint8(c.peerAckDelayExponent))
+ }
+ frames = append(frames, slog.AnyValue(debugFrameScaledAck{
+ ranges: f.ranges,
+ ackDelay: ackDelay,
+ }))
+ default:
+ frames = append(frames, slog.AnyValue(f))
+ }
+ }
+ return slog.Any("frames", frames)
+}
+
+func (c *Conn) logPacketLost(space numberSpace, sent *sentPacket) {
+ c.log.LogAttrs(context.Background(), QLogLevelPacket,
+ "recovery:packet_lost",
+ slog.Group("header",
+ slog.String("packet_type", sent.ptype.qlogString()),
+ slog.Uint64("packet_number", uint64(sent.num)),
+ ),
+ )
+}
diff --git a/quic/qlog/handler.go b/quic/qlog/handler.go
new file mode 100644
index 000000000..35a66cf8b
--- /dev/null
+++ b/quic/qlog/handler.go
@@ -0,0 +1,76 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package qlog
+
+import (
+ "context"
+ "log/slog"
+)
+
+type withAttrsHandler struct {
+ attrs []slog.Attr
+ h slog.Handler
+}
+
+func withAttrs(h slog.Handler, attrs []slog.Attr) slog.Handler {
+ if len(attrs) == 0 {
+ return h
+ }
+ return &withAttrsHandler{attrs: attrs, h: h}
+}
+
+func (h *withAttrsHandler) Enabled(ctx context.Context, level slog.Level) bool {
+ return h.h.Enabled(ctx, level)
+}
+
+func (h *withAttrsHandler) Handle(ctx context.Context, r slog.Record) error {
+ r.AddAttrs(h.attrs...)
+ return h.h.Handle(ctx, r)
+}
+
+func (h *withAttrsHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ return withAttrs(h, attrs)
+}
+
+func (h *withAttrsHandler) WithGroup(name string) slog.Handler {
+ return withGroup(h, name)
+}
+
+type withGroupHandler struct {
+ name string
+ h slog.Handler
+}
+
+func withGroup(h slog.Handler, name string) slog.Handler {
+ if name == "" {
+ return h
+ }
+ return &withGroupHandler{name: name, h: h}
+}
+
+func (h *withGroupHandler) Enabled(ctx context.Context, level slog.Level) bool {
+ return h.h.Enabled(ctx, level)
+}
+
+func (h *withGroupHandler) Handle(ctx context.Context, r slog.Record) error {
+ var attrs []slog.Attr
+ r.Attrs(func(a slog.Attr) bool {
+ attrs = append(attrs, a)
+ return true
+ })
+ nr := slog.NewRecord(r.Time, r.Level, r.Message, r.PC)
+ nr.Add(slog.Any(h.name, slog.GroupValue(attrs...)))
+ return h.h.Handle(ctx, nr)
+}
+
+func (h *withGroupHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ return withAttrs(h, attrs)
+}
+
+func (h *withGroupHandler) WithGroup(name string) slog.Handler {
+ return withGroup(h, name)
+}
diff --git a/quic/qlog/json_writer.go b/quic/qlog/json_writer.go
new file mode 100644
index 000000000..6fb8d33b2
--- /dev/null
+++ b/quic/qlog/json_writer.go
@@ -0,0 +1,261 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package qlog
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log/slog"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// A jsonWriter writes JSON-SEQ (RFC 7464).
+//
+// A JSON-SEQ file consists of a series of JSON text records,
+// each beginning with an RS (0x1e) character and ending with LF (0x0a).
+type jsonWriter struct {
+ mu sync.Mutex
+ w io.WriteCloser
+ buf bytes.Buffer
+}
+
+// writeRecordStart writes the start of a JSON-SEQ record.
+func (w *jsonWriter) writeRecordStart() {
+ w.mu.Lock()
+ w.buf.WriteByte(0x1e)
+ w.buf.WriteByte('{')
+}
+
+// writeRecordEnd finishes writing a JSON-SEQ record.
+func (w *jsonWriter) writeRecordEnd() {
+ w.buf.WriteByte('}')
+ w.buf.WriteByte('\n')
+ w.w.Write(w.buf.Bytes())
+ w.buf.Reset()
+ w.mu.Unlock()
+}
+
+func (w *jsonWriter) writeAttrs(attrs []slog.Attr) {
+ w.buf.WriteByte('{')
+ for _, a := range attrs {
+ w.writeAttr(a)
+ }
+ w.buf.WriteByte('}')
+}
+
+func (w *jsonWriter) writeAttr(a slog.Attr) {
+ if a.Key == "" {
+ return
+ }
+ w.writeName(a.Key)
+ w.writeValue(a.Value)
+}
+
+// writeAttr writes a []slog.Attr as an object field.
+func (w *jsonWriter) writeAttrsField(name string, attrs []slog.Attr) {
+ w.writeName(name)
+ w.writeAttrs(attrs)
+}
+
+func (w *jsonWriter) writeValue(v slog.Value) {
+ v = v.Resolve()
+ switch v.Kind() {
+ case slog.KindAny:
+ switch v := v.Any().(type) {
+ case []slog.Value:
+ w.writeArray(v)
+ case interface{ AppendJSON([]byte) []byte }:
+ w.buf.Write(v.AppendJSON(w.buf.AvailableBuffer()))
+ default:
+ w.writeString(fmt.Sprint(v))
+ }
+ case slog.KindBool:
+ w.writeBool(v.Bool())
+ case slog.KindDuration:
+ w.writeDuration(v.Duration())
+ case slog.KindFloat64:
+ w.writeFloat64(v.Float64())
+ case slog.KindInt64:
+ w.writeInt64(v.Int64())
+ case slog.KindString:
+ w.writeString(v.String())
+ case slog.KindTime:
+ w.writeTime(v.Time())
+ case slog.KindUint64:
+ w.writeUint64(v.Uint64())
+ case slog.KindGroup:
+ w.writeAttrs(v.Group())
+ default:
+ w.writeString("unhandled kind")
+ }
+}
+
+// writeName writes an object field name followed by a colon.
+func (w *jsonWriter) writeName(name string) {
+ if b := w.buf.Bytes(); len(b) > 0 && b[len(b)-1] != '{' {
+ // Add the comma separating this from the previous field.
+ w.buf.WriteByte(',')
+ }
+ w.writeString(name)
+ w.buf.WriteByte(':')
+}
+
+func (w *jsonWriter) writeObject(f func()) {
+ w.buf.WriteByte('{')
+ f()
+ w.buf.WriteByte('}')
+}
+
+// writeObject writes an object-valued object field.
+// The function f is called to write the contents.
+func (w *jsonWriter) writeObjectField(name string, f func()) {
+ w.writeName(name)
+ w.writeObject(f)
+}
+
+func (w *jsonWriter) writeArray(vals []slog.Value) {
+ w.buf.WriteByte('[')
+ for i, v := range vals {
+ if i != 0 {
+ w.buf.WriteByte(',')
+ }
+ w.writeValue(v)
+ }
+ w.buf.WriteByte(']')
+}
+
+func (w *jsonWriter) writeRaw(v string) {
+ w.buf.WriteString(v)
+}
+
+// writeRawField writes a field with a raw JSON value.
+func (w *jsonWriter) writeRawField(name, v string) {
+ w.writeName(name)
+ w.writeRaw(v)
+}
+
+func (w *jsonWriter) writeBool(v bool) {
+ if v {
+ w.buf.WriteString("true")
+ } else {
+ w.buf.WriteString("false")
+ }
+}
+
+// writeBoolField writes a bool-valued object field.
+func (w *jsonWriter) writeBoolField(name string, v bool) {
+ w.writeName(name)
+ w.writeBool(v)
+}
+
+// writeDuration writes a duration as milliseconds.
+func (w *jsonWriter) writeDuration(v time.Duration) {
+ if v < 0 {
+ w.buf.WriteByte('-')
+ v = -v
+ }
+ fmt.Fprintf(&w.buf, "%d.%06d", v.Milliseconds(), v%time.Millisecond)
+}
+
+// writeDurationField writes a millisecond duration-valued object field.
+func (w *jsonWriter) writeDurationField(name string, v time.Duration) {
+ w.writeName(name)
+ w.writeDuration(v)
+}
+
+func (w *jsonWriter) writeFloat64(v float64) {
+ w.buf.Write(strconv.AppendFloat(w.buf.AvailableBuffer(), v, 'f', -1, 64))
+}
+
+// writeFloat64Field writes an float64-valued object field.
+func (w *jsonWriter) writeFloat64Field(name string, v float64) {
+ w.writeName(name)
+ w.writeFloat64(v)
+}
+
+func (w *jsonWriter) writeInt64(v int64) {
+ w.buf.Write(strconv.AppendInt(w.buf.AvailableBuffer(), v, 10))
+}
+
+// writeInt64Field writes an int64-valued object field.
+func (w *jsonWriter) writeInt64Field(name string, v int64) {
+ w.writeName(name)
+ w.writeInt64(v)
+}
+
+func (w *jsonWriter) writeUint64(v uint64) {
+ w.buf.Write(strconv.AppendUint(w.buf.AvailableBuffer(), v, 10))
+}
+
+// writeUint64Field writes a uint64-valued object field.
+func (w *jsonWriter) writeUint64Field(name string, v uint64) {
+ w.writeName(name)
+ w.writeUint64(v)
+}
+
+// writeTime writes a time as seconds since the Unix epoch.
+func (w *jsonWriter) writeTime(v time.Time) {
+ fmt.Fprintf(&w.buf, "%d.%06d", v.UnixMilli(), v.Nanosecond()%int(time.Millisecond))
+}
+
+// writeTimeField writes a time-valued object field.
+func (w *jsonWriter) writeTimeField(name string, v time.Time) {
+ w.writeName(name)
+ w.writeTime(v)
+}
+
+func jsonSafeSet(c byte) bool {
+ // mask is a 128-bit bitmap with 1s for allowed bytes,
+ // so that the byte c can be tested with a shift and an and.
+ // If c > 128, then 1<`,
12: `. .
`,
@@ -169,3 +171,37 @@ func TestRenderer(t *testing.T) {
t.Errorf("got vs want:\n%s\n%s\n", got, want)
}
}
+
+func TestRenderTextNodes(t *testing.T) {
+ elements := []string{"style", "script", "xmp", "iframe", "noembed", "noframes", "plaintext", "noscript"}
+ for _, namespace := range []string{
+ "", // html
+ "svg",
+ "math",
+ } {
+ for _, e := range elements {
+ var namespaceOpen, namespaceClose string
+ if namespace != "" {
+ namespaceOpen, namespaceClose = fmt.Sprintf("<%s>", namespace), fmt.Sprintf("%s>", namespace)
+ }
+ doc := fmt.Sprintf(`%s<%s>&%s>%s`, namespaceOpen, e, e, namespaceClose)
+ n, err := Parse(strings.NewReader(doc))
+ if err != nil {
+ t.Fatal(err)
+ }
+ b := bytes.NewBuffer(nil)
+ if err := Render(b, n); err != nil {
+ t.Fatal(err)
+ }
+
+ expected := doc
+ if namespace != "" {
+ expected = strings.Replace(expected, "&", "&", 1)
+ }
+
+ if b.String() != expected {
+ t.Errorf("unexpected output: got %q, want %q", b.String(), expected)
+ }
+ }
+ }
+}
diff --git a/html/token.go b/html/token.go
index de67f938a..3c57880d6 100644
--- a/html/token.go
+++ b/html/token.go
@@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() {
return
}
switch c {
- case ' ', '\n', '\r', '\t', '\f', '/':
- z.pendingAttr[0].end = z.raw.end - 1
- return
case '=':
if z.pendingAttr[0].start+1 == z.raw.end {
// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
@@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() {
continue
}
fallthrough
- case '>':
+ case ' ', '\n', '\r', '\t', '\f', '/', '>':
+ // WHATWG 13.2.5.33 Attribute name state
+ // We need to reconsume the char in the after attribute name state to support the / character
z.raw.end--
z.pendingAttr[0].end = z.raw.end
return
@@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() {
if z.err != nil {
return
}
+ if c == '/' {
+ // WHATWG 13.2.5.34 After attribute name state
+ // U+002F SOLIDUS (/) - Switch to the self-closing start tag state.
+ return
+ }
if c != '=' {
z.raw.end--
return
diff --git a/html/token_test.go b/html/token_test.go
index b2383a951..8b0d5aab6 100644
--- a/html/token_test.go
+++ b/html/token_test.go
@@ -601,6 +601,21 @@ var tokenTests = []tokenTest{
`