diff --git a/pkg/http2/LICENSE b/pkg/http2/LICENSE index 6a66aea..2a7cf70 100644 --- a/pkg/http2/LICENSE +++ b/pkg/http2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/pkg/http2/client_conn_pool.go b/pkg/http2/client_conn_pool.go index 780968d..e81b73e 100644 --- a/pkg/http2/client_conn_pool.go +++ b/pkg/http2/client_conn_pool.go @@ -8,8 +8,8 @@ package http2 import ( "context" - "crypto/tls" "errors" + "net" "net/http" "sync" ) @@ -158,7 +158,7 @@ func (c *dialCall) dial(ctx context.Context, addr string) { // This code decides which ones live or die. // The return value used is whether c was used. // c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) { p.mu.Lock() for _, cc := range p.conns[key] { if cc.CanTakeNewRequest() { @@ -194,8 +194,8 @@ type addConnCall struct { err error } -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) +func (c *addConnCall) run(t *Transport, key string, nc net.Conn) { + cc, err := t.NewClientConn(nc) p := c.p p.mu.Lock() diff --git a/pkg/http2/clientconn_test.go b/pkg/http2/clientconn_test.go new file mode 100644 index 0000000..42d9fd2 --- /dev/null +++ b/pkg/http2/clientconn_test.go @@ -0,0 +1,594 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Infrastructure for testing ClientConn.RoundTrip. +// Put actual tests in transport_test.go. + +package http2 + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "reflect" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2/hpack" +) + +// TestTestClientConn demonstrates usage of testClientConn. +func TestTestClientConn(t *testing.T) { + // newTestClientConn creates a *ClientConn and surrounding test infrastructure. + tc := newTestClientConn(t) + + // tc.greet reads the client's initial SETTINGS and WINDOW_UPDATE frames, + // and sends a SETTINGS frame to the client. + // + // Additional settings may be provided as optional parameters to greet. + tc.greet() + + // Request bodies must either be constant (bytes.Buffer, strings.Reader) + // or created with newRequestBody. + body := tc.newRequestBody() + body.writeBytes(10) // 10 arbitrary bytes... + body.closeWithError(io.EOF) // ...followed by EOF. + + // tc.roundTrip calls RoundTrip, but does not wait for it to return. + // It returns a testRoundTrip. + req, _ := http.NewRequest("PUT", "https://dummy.tld/", body) + rt := tc.roundTrip(req) + + // tc has a number of methods to check for expected frames sent. + // Here, we look for headers and the request body. + tc.wantHeaders(wantHeader{ + streamID: rt.streamID(), + endStream: false, + header: http.Header{ + ":authority": []string{"dummy.tld"}, + ":method": []string{"PUT"}, + ":path": []string{"/"}, + }, + }) + // Expect 10 bytes of request body in DATA frames. + tc.wantData(wantData{ + streamID: rt.streamID(), + endStream: true, + size: 10, + multiple: true, + }) + + // tc.writeHeaders sends a HEADERS frame back to the client. + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + + // Now that we've received headers, RoundTrip has finished. + // testRoundTrip has various methods to examine the response, + // or to fetch the response and/or error returned by RoundTrip + rt.wantStatus(200) + rt.wantBody(nil) +} + +// A testClientConn allows testing ClientConn.RoundTrip against a fake server. +// +// A test using testClientConn consists of: +// - actions on the client (calling RoundTrip, making data available to Request.Body); +// - validation of frames sent by the client to the server; and +// - providing frames from the server to the client. +// +// testClientConn manages synchronization, so tests can generally be written as +// a linear sequence of actions and validations without additional synchronization. +type testClientConn struct { + t *testing.T + + tr *Transport + fr *Framer + cc *ClientConn + group *synctestGroup + testConnFramer + + encbuf bytes.Buffer + enc *hpack.Encoder + + roundtrips []*testRoundTrip + + netconn *synctestNetConn +} + +func newTestClientConnFromClientConn(t *testing.T, cc *ClientConn) *testClientConn { + tc := &testClientConn{ + t: t, + tr: cc.t, + cc: cc, + group: cc.t.transportTestHooks.group.(*synctestGroup), + } + + // srv is the side controlled by the test. + var srv *synctestNetConn + if cc.tconn == nil { + // If cc.tconn is nil, we're being called with a new conn created by the + // Transport's client pool. This path skips dialing the server, and we + // create a test connection pair here. + cc.tconn, srv = synctestNetPipe(tc.group) + } else { + // If cc.tconn is non-nil, we're in a test which provides a conn to the + // Transport via a TLSNextProto hook. Extract the test connection pair. + if tc, ok := cc.tconn.(*tls.Conn); ok { + // Unwrap any *tls.Conn to the underlying net.Conn, + // to avoid dealing with encryption in tests. + cc.tconn = tc.NetConn() + } + srv = cc.tconn.(*synctestNetConn).peer + } + + srv.SetReadDeadline(tc.group.Now()) + srv.autoWait = true + tc.netconn = srv + tc.enc = hpack.NewEncoder(&tc.encbuf) + tc.fr = NewFramer(srv, srv) + tc.testConnFramer = testConnFramer{ + t: t, + fr: tc.fr, + dec: hpack.NewDecoder(initialHeaderTableSize, nil), + } + tc.fr.SetMaxReadFrameSize(10 << 20) + t.Cleanup(func() { + tc.closeWrite() + }) + + return tc +} + +func (tc *testClientConn) readClientPreface() { + tc.t.Helper() + // Read the client's HTTP/2 preface, sent prior to any HTTP/2 frames. + buf := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(tc.netconn, buf); err != nil { + tc.t.Fatalf("reading preface: %v", err) + } + if !bytes.Equal(buf, clientPreface) { + tc.t.Fatalf("client preface: %q, want %q", buf, clientPreface) + } +} + +func newTestClientConn(t *testing.T, opts ...any) *testClientConn { + t.Helper() + + tt := newTestTransport(t, opts...) + const singleUse = false + _, err := tt.tr.newClientConn(nil, singleUse) + if err != nil { + t.Fatalf("newClientConn: %v", err) + } + + return tt.getConn() +} + +// sync waits for the ClientConn under test to reach a stable state, +// with all goroutines blocked on some input. +func (tc *testClientConn) sync() { + tc.group.Wait() +} + +// advance advances synthetic time by a duration. +func (tc *testClientConn) advance(d time.Duration) { + tc.group.AdvanceTime(d) + tc.sync() +} + +// hasFrame reports whether a frame is available to be read. +func (tc *testClientConn) hasFrame() bool { + return len(tc.netconn.Peek()) > 0 +} + +// isClosed reports whether the peer has closed the connection. +func (tc *testClientConn) isClosed() bool { + return tc.netconn.IsClosedByPeer() +} + +// closeWrite causes the net.Conn used by the ClientConn to return a error +// from Read calls. +func (tc *testClientConn) closeWrite() { + tc.netconn.Close() +} + +// testRequestBody is a Request.Body for use in tests. +type testRequestBody struct { + tc *testClientConn + gate gate + + // At most one of buf or bytes can be set at any given time: + buf bytes.Buffer // specific bytes to read from the body + bytes int // body contains this many arbitrary bytes + + err error // read error (comes after any available bytes) +} + +func (tc *testClientConn) newRequestBody() *testRequestBody { + b := &testRequestBody{ + tc: tc, + gate: newGate(), + } + return b +} + +func (b *testRequestBody) unlock() { + b.gate.unlock(b.buf.Len() > 0 || b.bytes > 0 || b.err != nil) +} + +// Read is called by the ClientConn to read from a request body. +func (b *testRequestBody) Read(p []byte) (n int, _ error) { + if err := b.gate.waitAndLock(context.Background()); err != nil { + return 0, err + } + defer b.unlock() + switch { + case b.buf.Len() > 0: + return b.buf.Read(p) + case b.bytes > 0: + if len(p) > b.bytes { + p = p[:b.bytes] + } + b.bytes -= len(p) + for i := range p { + p[i] = 'A' + } + return len(p), nil + default: + return 0, b.err + } +} + +// Close is called by the ClientConn when it is done reading from a request body. +func (b *testRequestBody) Close() error { + return nil +} + +// writeBytes adds n arbitrary bytes to the body. +func (b *testRequestBody) writeBytes(n int) { + defer b.tc.sync() + b.gate.lock() + defer b.unlock() + b.bytes += n + b.checkWrite() + b.tc.sync() +} + +// Write adds bytes to the body. +func (b *testRequestBody) Write(p []byte) (int, error) { + defer b.tc.sync() + b.gate.lock() + defer b.unlock() + n, err := b.buf.Write(p) + b.checkWrite() + return n, err +} + +func (b *testRequestBody) checkWrite() { + if b.bytes > 0 && b.buf.Len() > 0 { + b.tc.t.Fatalf("can't interleave Write and writeBytes on request body") + } + if b.err != nil { + b.tc.t.Fatalf("can't write to request body after closeWithError") + } +} + +// closeWithError sets an error which will be returned by Read. +func (b *testRequestBody) closeWithError(err error) { + defer b.tc.sync() + b.gate.lock() + defer b.unlock() + b.err = err +} + +// roundTrip starts a RoundTrip call. +// +// (Note that the RoundTrip won't complete until response headers are received, +// the request times out, or some other terminal condition is reached.) +func (tc *testClientConn) roundTrip(req *http.Request) *testRoundTrip { + rt := &testRoundTrip{ + t: tc.t, + donec: make(chan struct{}), + } + tc.roundtrips = append(tc.roundtrips, rt) + go func() { + tc.group.Join() + defer close(rt.donec) + rt.resp, rt.respErr = tc.cc.roundTrip(req, func(cs *clientStream) { + rt.id.Store(cs.ID) + }) + }() + tc.sync() + + tc.t.Cleanup(func() { + if !rt.done() { + return + } + res, _ := rt.result() + if res != nil { + res.Body.Close() + } + }) + + return rt +} + +func (tc *testClientConn) greet(settings ...Setting) { + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + tc.writeSettings(settings...) + tc.writeSettingsAck() + tc.wantFrameType(FrameSettings) // acknowledgement +} + +// makeHeaderBlockFragment encodes headers in a form suitable for inclusion +// in a HEADERS or CONTINUATION frame. +// +// It takes a list of alernating names and values. +func (tc *testClientConn) makeHeaderBlockFragment(s ...string) []byte { + if len(s)%2 != 0 { + tc.t.Fatalf("uneven list of header name/value pairs") + } + tc.encbuf.Reset() + for i := 0; i < len(s); i += 2 { + tc.enc.WriteField(hpack.HeaderField{Name: s[i], Value: s[i+1]}) + } + return tc.encbuf.Bytes() +} + +// inflowWindow returns the amount of inbound flow control available for a stream, +// or for the connection if streamID is 0. +func (tc *testClientConn) inflowWindow(streamID uint32) int32 { + tc.cc.mu.Lock() + defer tc.cc.mu.Unlock() + if streamID == 0 { + return tc.cc.inflow.avail + tc.cc.inflow.unsent + } + cs := tc.cc.streams[streamID] + if cs == nil { + tc.t.Errorf("no stream with id %v", streamID) + return -1 + } + return cs.inflow.avail + cs.inflow.unsent +} + +// testRoundTrip manages a RoundTrip in progress. +type testRoundTrip struct { + t *testing.T + resp *http.Response + respErr error + donec chan struct{} + id atomic.Uint32 +} + +// streamID returns the HTTP/2 stream ID of the request. +func (rt *testRoundTrip) streamID() uint32 { + id := rt.id.Load() + if id == 0 { + panic("stream ID unknown") + } + return id +} + +// done reports whether RoundTrip has returned. +func (rt *testRoundTrip) done() bool { + select { + case <-rt.donec: + return true + default: + return false + } +} + +// result returns the result of the RoundTrip. +func (rt *testRoundTrip) result() (*http.Response, error) { + t := rt.t + t.Helper() + select { + case <-rt.donec: + default: + t.Fatalf("RoundTrip is not done; want it to be") + } + return rt.resp, rt.respErr +} + +// response returns the response of a successful RoundTrip. +// If the RoundTrip unexpectedly failed, it calls t.Fatal. +func (rt *testRoundTrip) response() *http.Response { + t := rt.t + t.Helper() + resp, err := rt.result() + if err != nil { + t.Fatalf("RoundTrip returned unexpected error: %v", rt.respErr) + } + if resp == nil { + t.Fatalf("RoundTrip returned nil *Response and nil error") + } + return resp +} + +// err returns the (possibly nil) error result of RoundTrip. +func (rt *testRoundTrip) err() error { + t := rt.t + t.Helper() + _, err := rt.result() + return err +} + +// wantStatus indicates the expected response StatusCode. +func (rt *testRoundTrip) wantStatus(want int) { + t := rt.t + t.Helper() + if got := rt.response().StatusCode; got != want { + t.Fatalf("got response status %v, want %v", got, want) + } +} + +// body reads the contents of the response body. +func (rt *testRoundTrip) readBody() ([]byte, error) { + t := rt.t + t.Helper() + return io.ReadAll(rt.response().Body) +} + +// wantBody indicates the expected response body. +// (Note that this consumes the body.) +func (rt *testRoundTrip) wantBody(want []byte) { + t := rt.t + t.Helper() + got, err := rt.readBody() + if err != nil { + t.Fatalf("unexpected error reading response body: %v", err) + } + if !bytes.Equal(got, want) { + t.Fatalf("unexpected response body:\ngot: %q\nwant: %q", got, want) + } +} + +// wantHeaders indicates the expected response headers. +func (rt *testRoundTrip) wantHeaders(want http.Header) { + t := rt.t + t.Helper() + res := rt.response() + if diff := diffHeaders(res.Header, want); diff != "" { + t.Fatalf("unexpected response headers:\n%v", diff) + } +} + +// wantTrailers indicates the expected response trailers. +func (rt *testRoundTrip) wantTrailers(want http.Header) { + t := rt.t + t.Helper() + res := rt.response() + if diff := diffHeaders(res.Trailer, want); diff != "" { + t.Fatalf("unexpected response trailers:\n%v", diff) + } +} + +func diffHeaders(got, want http.Header) string { + // nil and 0-length non-nil are equal. + if len(got) == 0 && len(want) == 0 { + return "" + } + // We could do a more sophisticated diff here. + // DeepEqual is good enough for now. + if reflect.DeepEqual(got, want) { + return "" + } + return fmt.Sprintf("got: %v\nwant: %v", got, want) +} + +// A testTransport allows testing Transport.RoundTrip against fake servers. +// Tests that aren't specifically exercising RoundTrip's retry loop or connection pooling +// should use testClientConn instead. +type testTransport struct { + t *testing.T + tr *Transport + group *synctestGroup + + ccs []*testClientConn +} + +func newTestTransport(t *testing.T, opts ...any) *testTransport { + tt := &testTransport{ + t: t, + group: newSynctest(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)), + } + tt.group.Join() + + tr := &Transport{} + for _, o := range opts { + switch o := o.(type) { + case func(*http.Transport): + if tr.t1 == nil { + tr.t1 = &http.Transport{} + } + o(tr.t1) + case func(*Transport): + o(tr) + case *Transport: + tr = o + } + } + tt.tr = tr + + tr.transportTestHooks = &transportTestHooks{ + group: tt.group, + newclientconn: func(cc *ClientConn) { + tc := newTestClientConnFromClientConn(t, cc) + tt.ccs = append(tt.ccs, tc) + }, + } + + t.Cleanup(func() { + tt.sync() + if len(tt.ccs) > 0 { + t.Fatalf("%v test ClientConns created, but not examined by test", len(tt.ccs)) + } + tt.group.Close(t) + }) + + return tt +} + +func (tt *testTransport) sync() { + tt.group.Wait() +} + +func (tt *testTransport) advance(d time.Duration) { + tt.group.AdvanceTime(d) + tt.sync() +} + +func (tt *testTransport) hasConn() bool { + return len(tt.ccs) > 0 +} + +func (tt *testTransport) getConn() *testClientConn { + tt.t.Helper() + if len(tt.ccs) == 0 { + tt.t.Fatalf("no new ClientConns created; wanted one") + } + tc := tt.ccs[0] + tt.ccs = tt.ccs[1:] + tc.sync() + tc.readClientPreface() + tc.sync() + return tc +} + +func (tt *testTransport) roundTrip(req *http.Request) *testRoundTrip { + rt := &testRoundTrip{ + t: tt.t, + donec: make(chan struct{}), + } + go func() { + tt.group.Join() + defer close(rt.donec) + rt.resp, rt.respErr = tt.tr.RoundTrip(req) + }() + tt.sync() + + tt.t.Cleanup(func() { + if !rt.done() { + return + } + res, _ := rt.result() + if res != nil { + res.Body.Close() + } + }) + + return rt +} diff --git a/pkg/http2/config.go b/pkg/http2/config.go new file mode 100644 index 0000000..de58dfb --- /dev/null +++ b/pkg/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/pkg/http2/config_go124.go b/pkg/http2/config_go124.go new file mode 100644 index 0000000..e378412 --- /dev/null +++ b/pkg/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/pkg/http2/config_pre_go124.go b/pkg/http2/config_pre_go124.go new file mode 100644 index 0000000..060fd6c --- /dev/null +++ b/pkg/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/pkg/http2/config_test.go b/pkg/http2/config_test.go new file mode 100644 index 0000000..b8e7a7b --- /dev/null +++ b/pkg/http2/config_test.go @@ -0,0 +1,95 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import ( + "net/http" + "testing" + "time" +) + +func TestConfigServerSettings(t *testing.T) { + config := &http.HTTP2Config{ + MaxConcurrentStreams: 1, + MaxDecoderHeaderTableSize: 1<<20 + 2, + MaxEncoderHeaderTableSize: 1<<20 + 3, + MaxReadFrameSize: 1<<20 + 4, + MaxReceiveBufferPerConnection: 64<<10 + 5, + MaxReceiveBufferPerStream: 64<<10 + 6, + } + const maxHeaderBytes = 4096 + 7 + st := newServerTester(t, nil, func(s *http.Server) { + s.MaxHeaderBytes = maxHeaderBytes + s.HTTP2 = config + }) + st.writePreface() + st.writeSettings() + st.wantSettings(map[SettingID]uint32{ + SettingMaxConcurrentStreams: uint32(config.MaxConcurrentStreams), + SettingHeaderTableSize: uint32(config.MaxDecoderHeaderTableSize), + SettingInitialWindowSize: uint32(config.MaxReceiveBufferPerStream), + SettingMaxFrameSize: uint32(config.MaxReadFrameSize), + SettingMaxHeaderListSize: maxHeaderBytes + (32 * 10), + }) +} + +func TestConfigTransportSettings(t *testing.T) { + config := &http.HTTP2Config{ + MaxConcurrentStreams: 1, // ignored by Transport + MaxDecoderHeaderTableSize: 1<<20 + 2, + MaxEncoderHeaderTableSize: 1<<20 + 3, + MaxReadFrameSize: 1<<20 + 4, + MaxReceiveBufferPerConnection: 64<<10 + 5, + MaxReceiveBufferPerStream: 64<<10 + 6, + } + const maxHeaderBytes = 4096 + 7 + tc := newTestClientConn(t, func(tr *http.Transport) { + tr.HTTP2 = config + tr.MaxResponseHeaderBytes = maxHeaderBytes + }) + tc.wantSettings(map[SettingID]uint32{ + SettingHeaderTableSize: uint32(config.MaxDecoderHeaderTableSize), + SettingInitialWindowSize: uint32(config.MaxReceiveBufferPerStream), + SettingMaxFrameSize: uint32(config.MaxReadFrameSize), + SettingMaxHeaderListSize: maxHeaderBytes + (32 * 10), + }) + tc.wantWindowUpdate(0, uint32(config.MaxReceiveBufferPerConnection)) +} + +func TestConfigPingTimeoutServer(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + }, func(s *Server) { + s.ReadIdleTimeout = 2 * time.Second + s.PingTimeout = 3 * time.Second + }) + st.greet() + + st.advance(2 * time.Second) + _ = readFrame[*PingFrame](t, st) + st.advance(3 * time.Second) + st.wantClosed() +} + +func TestConfigPingTimeoutTransport(t *testing.T) { + tc := newTestClientConn(t, func(tr *Transport) { + tr.ReadIdleTimeout = 2 * time.Second + tr.PingTimeout = 3 * time.Second + }) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + tc.wantFrameType(FrameHeaders) + + tc.advance(2 * time.Second) + tc.wantFrameType(FramePing) + tc.advance(3 * time.Second) + err := rt.err() + if err == nil { + t.Fatalf("expected connection to close") + } +} diff --git a/pkg/http2/connframes_test.go b/pkg/http2/connframes_test.go new file mode 100644 index 0000000..2c45325 --- /dev/null +++ b/pkg/http2/connframes_test.go @@ -0,0 +1,431 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "io" + "net/http" + "os" + "reflect" + "slices" + "testing" + + "golang.org/x/net/http2/hpack" +) + +type testConnFramer struct { + t testing.TB + fr *Framer + dec *hpack.Decoder +} + +// readFrame reads the next frame. +// It returns nil if the conn is closed or no frames are available. +func (tf *testConnFramer) readFrame() Frame { + tf.t.Helper() + fr, err := tf.fr.ReadFrame() + if err == io.EOF || err == os.ErrDeadlineExceeded { + return nil + } + if err != nil { + tf.t.Fatalf("ReadFrame: %v", err) + } + return fr +} + +type readFramer interface { + readFrame() Frame +} + +// readFrame reads a frame of a specific type. +func readFrame[T any](t testing.TB, framer readFramer) T { + t.Helper() + var v T + fr := framer.readFrame() + if fr == nil { + t.Fatalf("got no frame, want frame %T", v) + } + v, ok := fr.(T) + if !ok { + t.Fatalf("got frame %T, want %T", fr, v) + } + return v +} + +// wantFrameType reads the next frame. +// It produces an error if the frame type is not the expected value. +func (tf *testConnFramer) wantFrameType(want FrameType) { + tf.t.Helper() + fr := tf.readFrame() + if fr == nil { + tf.t.Fatalf("got no frame, want frame %v", want) + } + if got := fr.Header().Type; got != want { + tf.t.Fatalf("got frame %v, want %v", got, want) + } +} + +// wantUnorderedFrames reads frames until every condition in want has been satisfied. +// +// want is a list of func(*SomeFrame) bool. +// wantUnorderedFrames will call each func with frames of the appropriate type +// until the func returns true. +// It calls t.Fatal if an unexpected frame is received (no func has that frame type, +// or all funcs with that type have returned true), or if the framer runs out of frames +// with unsatisfied funcs. +// +// Example: +// +// // Read a SETTINGS frame, and any number of DATA frames for a stream. +// // The SETTINGS frame may appear anywhere in the sequence. +// // The last DATA frame must indicate the end of the stream. +// tf.wantUnorderedFrames( +// func(f *SettingsFrame) bool { +// return true +// }, +// func(f *DataFrame) bool { +// return f.StreamEnded() +// }, +// ) +func (tf *testConnFramer) wantUnorderedFrames(want ...any) { + tf.t.Helper() + want = slices.Clone(want) + seen := 0 +frame: + for seen < len(want) && !tf.t.Failed() { + fr := tf.readFrame() + if fr == nil { + break + } + for i, f := range want { + if f == nil { + continue + } + typ := reflect.TypeOf(f) + if typ.Kind() != reflect.Func || + typ.NumIn() != 1 || + typ.NumOut() != 1 || + typ.Out(0) != reflect.TypeOf(true) { + tf.t.Fatalf("expected func(*SomeFrame) bool, got %T", f) + } + if typ.In(0) == reflect.TypeOf(fr) { + out := reflect.ValueOf(f).Call([]reflect.Value{reflect.ValueOf(fr)}) + if out[0].Bool() { + want[i] = nil + seen++ + } + continue frame + } + } + tf.t.Errorf("got unexpected frame type %T", fr) + } + if seen < len(want) { + for _, f := range want { + if f == nil { + continue + } + tf.t.Errorf("did not see expected frame: %v", reflect.TypeOf(f).In(0)) + } + tf.t.Fatalf("did not see %v expected frame types", len(want)-seen) + } +} + +type wantHeader struct { + streamID uint32 + endStream bool + header http.Header +} + +// wantHeaders reads a HEADERS frame and potential CONTINUATION frames, +// and asserts that they contain the expected headers. +func (tf *testConnFramer) wantHeaders(want wantHeader) { + tf.t.Helper() + + hf := readFrame[*HeadersFrame](tf.t, tf) + if got, want := hf.StreamID, want.streamID; got != want { + tf.t.Fatalf("got stream ID %v, want %v", got, want) + } + if got, want := hf.StreamEnded(), want.endStream; got != want { + tf.t.Fatalf("got stream ended %v, want %v", got, want) + } + + gotHeader := make(http.Header) + tf.dec.SetEmitFunc(func(hf hpack.HeaderField) { + gotHeader[hf.Name] = append(gotHeader[hf.Name], hf.Value) + }) + defer tf.dec.SetEmitFunc(nil) + if _, err := tf.dec.Write(hf.HeaderBlockFragment()); err != nil { + tf.t.Fatalf("decoding HEADERS frame: %v", err) + } + headersEnded := hf.HeadersEnded() + for !headersEnded { + cf := readFrame[*ContinuationFrame](tf.t, tf) + if cf == nil { + tf.t.Fatalf("got end of frames, want CONTINUATION") + } + if _, err := tf.dec.Write(cf.HeaderBlockFragment()); err != nil { + tf.t.Fatalf("decoding CONTINUATION frame: %v", err) + } + headersEnded = cf.HeadersEnded() + } + if err := tf.dec.Close(); err != nil { + tf.t.Fatalf("hpack decoding error: %v", err) + } + + for k, v := range want.header { + if !reflect.DeepEqual(v, gotHeader[k]) { + tf.t.Fatalf("got header %q = %q; want %q", k, v, gotHeader[k]) + } + } +} + +// decodeHeader supports some older server tests. +// TODO: rewrite those tests to use newer, more convenient test APIs. +func (tf *testConnFramer) decodeHeader(headerBlock []byte) (pairs [][2]string) { + tf.dec.SetEmitFunc(func(hf hpack.HeaderField) { + if hf.Name == "date" { + return + } + pairs = append(pairs, [2]string{hf.Name, hf.Value}) + }) + defer tf.dec.SetEmitFunc(nil) + if _, err := tf.dec.Write(headerBlock); err != nil { + tf.t.Fatalf("hpack decoding error: %v", err) + } + if err := tf.dec.Close(); err != nil { + tf.t.Fatalf("hpack decoding error: %v", err) + } + return pairs +} + +type wantData struct { + streamID uint32 + endStream bool + size int + data []byte + multiple bool // data may be spread across multiple DATA frames +} + +// wantData reads zero or more DATA frames, and asserts that they match the expectation. +func (tf *testConnFramer) wantData(want wantData) { + tf.t.Helper() + gotSize := 0 + gotEndStream := false + if want.data != nil { + want.size = len(want.data) + } + var gotData []byte + for { + fr := tf.readFrame() + if fr == nil { + break + } + data, ok := fr.(*DataFrame) + if !ok { + tf.t.Fatalf("got frame %T, want DataFrame", fr) + } + if want.data != nil { + gotData = append(gotData, data.Data()...) + } + gotSize += len(data.Data()) + if data.StreamEnded() { + gotEndStream = true + break + } + if !want.endStream && gotSize >= want.size { + break + } + if !want.multiple { + break + } + } + if gotSize != want.size { + tf.t.Fatalf("got %v bytes of DATA frames, want %v", gotSize, want.size) + } + if gotEndStream != want.endStream { + tf.t.Fatalf("after %v bytes of DATA frames, got END_STREAM=%v; want %v", gotSize, gotEndStream, want.endStream) + } + if want.data != nil && !bytes.Equal(gotData, want.data) { + tf.t.Fatalf("got data %q, want %q", gotData, want.data) + } +} + +func (tf *testConnFramer) wantRSTStream(streamID uint32, code ErrCode) { + tf.t.Helper() + fr := readFrame[*RSTStreamFrame](tf.t, tf) + if fr.StreamID != streamID || fr.ErrCode != code { + tf.t.Fatalf("got %v, want RST_STREAM StreamID=%v, code=%v", summarizeFrame(fr), streamID, code) + } +} + +func (tf *testConnFramer) wantSettings(want map[SettingID]uint32) { + fr := readFrame[*SettingsFrame](tf.t, tf) + if fr.Header().Flags.Has(FlagSettingsAck) { + tf.t.Errorf("got SETTINGS frame with ACK set, want no ACK") + } + for wantID, wantVal := range want { + gotVal, ok := fr.Value(wantID) + if !ok { + tf.t.Errorf("SETTINGS: %v is not set, want %v", wantID, wantVal) + } else if gotVal != wantVal { + tf.t.Errorf("SETTINGS: %v is %v, want %v", wantID, gotVal, wantVal) + } + } + if tf.t.Failed() { + tf.t.Fatalf("%v", fr) + } +} + +func (tf *testConnFramer) wantSettingsAck() { + tf.t.Helper() + fr := readFrame[*SettingsFrame](tf.t, tf) + if !fr.Header().Flags.Has(FlagSettingsAck) { + tf.t.Fatal("Settings Frame didn't have ACK set") + } +} + +func (tf *testConnFramer) wantGoAway(maxStreamID uint32, code ErrCode) { + tf.t.Helper() + fr := readFrame[*GoAwayFrame](tf.t, tf) + if fr.LastStreamID != maxStreamID || fr.ErrCode != code { + tf.t.Fatalf("got %v, want GOAWAY LastStreamID=%v, code=%v", summarizeFrame(fr), maxStreamID, code) + } +} + +func (tf *testConnFramer) wantWindowUpdate(streamID, incr uint32) { + tf.t.Helper() + wu := readFrame[*WindowUpdateFrame](tf.t, tf) + if wu.FrameHeader.StreamID != streamID { + tf.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) + } + if wu.Increment != incr { + tf.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) + } +} + +func (tf *testConnFramer) wantClosed() { + tf.t.Helper() + fr, err := tf.fr.ReadFrame() + if err == nil { + tf.t.Fatalf("got unexpected frame (want closed connection): %v", fr) + } + if err == os.ErrDeadlineExceeded { + tf.t.Fatalf("connection is not closed; want it to be") + } +} + +func (tf *testConnFramer) wantIdle() { + tf.t.Helper() + fr, err := tf.fr.ReadFrame() + if err == nil { + tf.t.Fatalf("got unexpected frame (want idle connection): %v", fr) + } + if err != os.ErrDeadlineExceeded { + tf.t.Fatalf("got unexpected frame error (want idle connection): %v", err) + } +} + +func (tf *testConnFramer) writeSettings(settings ...Setting) { + tf.t.Helper() + if err := tf.fr.WriteSettings(settings...); err != nil { + tf.t.Fatal(err) + } +} + +func (tf *testConnFramer) writeSettingsAck() { + tf.t.Helper() + if err := tf.fr.WriteSettingsAck(); err != nil { + tf.t.Fatal(err) + } +} + +func (tf *testConnFramer) writeData(streamID uint32, endStream bool, data []byte) { + tf.t.Helper() + if err := tf.fr.WriteData(streamID, endStream, data); err != nil { + tf.t.Fatal(err) + } +} + +func (tf *testConnFramer) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) { + tf.t.Helper() + if err := tf.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil { + tf.t.Fatal(err) + } +} + +func (tf *testConnFramer) writeHeaders(p HeadersFrameParam) { + tf.t.Helper() + if err := tf.fr.WriteHeaders(p); err != nil { + tf.t.Fatal(err) + } +} + +// writeHeadersMode writes header frames, as modified by mode: +// +// - noHeader: Don't write the header. +// - oneHeader: Write a single HEADERS frame. +// - splitHeader: Write a HEADERS frame and CONTINUATION frame. +func (tf *testConnFramer) writeHeadersMode(mode headerType, p HeadersFrameParam) { + tf.t.Helper() + switch mode { + case noHeader: + case oneHeader: + tf.writeHeaders(p) + case splitHeader: + if len(p.BlockFragment) < 2 { + panic("too small") + } + contData := p.BlockFragment[1:] + contEnd := p.EndHeaders + p.BlockFragment = p.BlockFragment[:1] + p.EndHeaders = false + tf.writeHeaders(p) + tf.writeContinuation(p.StreamID, contEnd, contData) + default: + panic("bogus mode") + } +} + +func (tf *testConnFramer) writeContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) { + tf.t.Helper() + if err := tf.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { + tf.t.Fatal(err) + } +} + +func (tf *testConnFramer) writePriority(id uint32, p PriorityParam) { + if err := tf.fr.WritePriority(id, p); err != nil { + tf.t.Fatal(err) + } +} + +func (tf *testConnFramer) writeRSTStream(streamID uint32, code ErrCode) { + tf.t.Helper() + if err := tf.fr.WriteRSTStream(streamID, code); err != nil { + tf.t.Fatal(err) + } +} + +func (tf *testConnFramer) writePing(ack bool, data [8]byte) { + tf.t.Helper() + if err := tf.fr.WritePing(ack, data); err != nil { + tf.t.Fatal(err) + } +} + +func (tf *testConnFramer) writeGoAway(maxStreamID uint32, code ErrCode, debugData []byte) { + tf.t.Helper() + if err := tf.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { + tf.t.Fatal(err) + } +} + +func (tf *testConnFramer) writeWindowUpdate(streamID, incr uint32) { + tf.t.Helper() + if err := tf.fr.WriteWindowUpdate(streamID, incr); err != nil { + tf.t.Fatal(err) + } +} diff --git a/pkg/http2/frame.go b/pkg/http2/frame.go index e2b298d..81faec7 100644 --- a/pkg/http2/frame.go +++ b/pkg/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1487,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1495,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1564,6 +1567,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1576,8 +1580,38 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return mh, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1594,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/pkg/http2/gate_test.go b/pkg/http2/gate_test.go new file mode 100644 index 0000000..e5e6a31 --- /dev/null +++ b/pkg/http2/gate_test.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "context" + +// An gate is a monitor (mutex + condition variable) with one bit of state. +// +// The condition may be either set or unset. +// Lock operations may be unconditional, or wait for the condition to be set. +// Unlock operations record the new state of the condition. +type gate struct { + // When unlocked, exactly one of set or unset contains a value. + // When locked, neither chan contains a value. + set chan struct{} + unset chan struct{} +} + +// newGate returns a new, unlocked gate with the condition unset. +func newGate() gate { + g := newLockedGate() + g.unlock(false) + return g +} + +// newLocked gate returns a new, locked gate. +func newLockedGate() gate { + return gate{ + set: make(chan struct{}, 1), + unset: make(chan struct{}, 1), + } +} + +// lock acquires the gate unconditionally. +// It reports whether the condition is set. +func (g *gate) lock() (set bool) { + select { + case <-g.set: + return true + case <-g.unset: + return false + } +} + +// waitAndLock waits until the condition is set before acquiring the gate. +// If the context expires, waitAndLock returns an error and does not acquire the gate. +func (g *gate) waitAndLock(ctx context.Context) error { + select { + case <-g.set: + return nil + default: + } + select { + case <-g.set: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// lockIfSet acquires the gate if and only if the condition is set. +func (g *gate) lockIfSet() (acquired bool) { + select { + case <-g.set: + return true + default: + return false + } +} + +// unlock sets the condition and releases the gate. +func (g *gate) unlock(set bool) { + if set { + g.set <- struct{}{} + } else { + g.unset <- struct{}{} + } +} + +// unlock sets the condition to the result of f and releases the gate. +// Useful in defers. +func (g *gate) unlockFunc(f func() bool) { + g.unlock(f()) +} diff --git a/pkg/http2/h2c/h2c_test.go b/pkg/http2/h2c/h2c_test.go index 038cbc3..3e78f29 100644 --- a/pkg/http2/h2c/h2c_test.go +++ b/pkg/http2/h2c/h2c_test.go @@ -9,7 +9,6 @@ import ( "crypto/tls" "fmt" "io" - "io/ioutil" "log" "net" "net/http" @@ -68,7 +67,7 @@ func TestContext(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil { t.Fatal(err) } @@ -162,7 +161,7 @@ func TestMaxBytesHandler(t *testing.T) { t.Fatal(err) } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil { t.Fatal(err) } diff --git a/pkg/http2/hpack/gen.go b/pkg/http2/hpack/gen.go index 21a4198..0efa8e5 100644 --- a/pkg/http2/hpack/gen.go +++ b/pkg/http2/hpack/gen.go @@ -10,7 +10,6 @@ import ( "bytes" "fmt" "go/format" - "io/ioutil" "os" "sort" @@ -176,7 +175,7 @@ func genFile(name string, buf *bytes.Buffer) { fmt.Fprintln(os.Stderr, err) os.Exit(1) } - if err := ioutil.WriteFile(name, b, 0644); err != nil { + if err := os.WriteFile(name, b, 0644); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } diff --git a/pkg/http2/http2.go b/pkg/http2/http2.go index 6f2df28..c7601c9 100644 --- a/pkg/http2/http2.go +++ b/pkg/http2/http2.go @@ -17,24 +17,28 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -47,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -138,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -147,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { @@ -210,12 +223,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -241,13 +248,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -274,7 +287,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -292,6 +305,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") @@ -383,3 +428,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/pkg/http2/http2_test.go b/pkg/http2/http2_test.go index a16774b..b1e71f1 100644 --- a/pkg/http2/http2_test.go +++ b/pkg/http2/http2_test.go @@ -8,7 +8,6 @@ import ( "bytes" "flag" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -266,7 +265,7 @@ func TestNoUnicodeStrings(t *testing.T) { return nil } - contents, err := ioutil.ReadFile(path) + contents, err := os.ReadFile(path) if err != nil { t.Fatal(err) } @@ -284,3 +283,11 @@ func TestNoUnicodeStrings(t *testing.T) { t.Fatal(err) } } + +// must returns v if err is nil, or panics otherwise. +func must[T any](v T, err error) T { + if err != nil { + panic(err) + } + return v +} diff --git a/pkg/http2/netconn_test.go b/pkg/http2/netconn_test.go new file mode 100644 index 0000000..0f1b5fb --- /dev/null +++ b/pkg/http2/netconn_test.go @@ -0,0 +1,356 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "context" + "errors" + "io" + "math" + "net" + "net/netip" + "os" + "sync" + "time" +) + +// synctestNetPipe creates an in-memory, full duplex network connection. +// Read and write timeouts are managed by the synctest group. +// +// Unlike net.Pipe, the connection is not synchronous. +// Writes are made to a buffer, and return immediately. +// By default, the buffer size is unlimited. +func synctestNetPipe(group *synctestGroup) (r, w *synctestNetConn) { + s1addr := net.TCPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:8000")) + s2addr := net.TCPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:8001")) + s1 := newSynctestNetConnHalf(s1addr) + s2 := newSynctestNetConnHalf(s2addr) + r = &synctestNetConn{group: group, loc: s1, rem: s2} + w = &synctestNetConn{group: group, loc: s2, rem: s1} + r.peer = w + w.peer = r + return r, w +} + +// A synctestNetConn is one endpoint of the connection created by synctestNetPipe. +type synctestNetConn struct { + group *synctestGroup + + // local and remote connection halves. + // Each half contains a buffer. + // Reads pull from the local buffer, and writes push to the remote buffer. + loc, rem *synctestNetConnHalf + + // When set, group.Wait is automatically called before reads and after writes. + autoWait bool + + // peer is the other endpoint. + peer *synctestNetConn +} + +// Read reads data from the connection. +func (c *synctestNetConn) Read(b []byte) (n int, err error) { + if c.autoWait { + c.group.Wait() + } + return c.loc.read(b) +} + +// Peek returns the available unread read buffer, +// without consuming its contents. +func (c *synctestNetConn) Peek() []byte { + if c.autoWait { + c.group.Wait() + } + return c.loc.peek() +} + +// Write writes data to the connection. +func (c *synctestNetConn) Write(b []byte) (n int, err error) { + if c.autoWait { + defer c.group.Wait() + } + return c.rem.write(b) +} + +// IsClosed reports whether the peer has closed its end of the connection. +func (c *synctestNetConn) IsClosedByPeer() bool { + if c.autoWait { + c.group.Wait() + } + return c.loc.isClosedByPeer() +} + +// Close closes the connection. +func (c *synctestNetConn) Close() error { + c.loc.setWriteError(errors.New("connection closed by peer")) + c.rem.setReadError(io.EOF) + if c.autoWait { + c.group.Wait() + } + return nil +} + +// LocalAddr returns the (fake) local network address. +func (c *synctestNetConn) LocalAddr() net.Addr { + return c.loc.addr +} + +// LocalAddr returns the (fake) remote network address. +func (c *synctestNetConn) RemoteAddr() net.Addr { + return c.rem.addr +} + +// SetDeadline sets the read and write deadlines for the connection. +func (c *synctestNetConn) SetDeadline(t time.Time) error { + c.SetReadDeadline(t) + c.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline sets the read deadline for the connection. +func (c *synctestNetConn) SetReadDeadline(t time.Time) error { + c.loc.rctx.setDeadline(c.group, t) + return nil +} + +// SetWriteDeadline sets the write deadline for the connection. +func (c *synctestNetConn) SetWriteDeadline(t time.Time) error { + c.rem.wctx.setDeadline(c.group, t) + return nil +} + +// SetReadBufferSize sets the read buffer limit for the connection. +// Writes by the peer will block so long as the buffer is full. +func (c *synctestNetConn) SetReadBufferSize(size int) { + c.loc.setReadBufferSize(size) +} + +// synctestNetConnHalf is one data flow in the connection created by synctestNetPipe. +// Each half contains a buffer. Writes to the half push to the buffer, and reads pull from it. +type synctestNetConnHalf struct { + addr net.Addr + + // Read and write timeouts. + rctx, wctx deadlineContext + + // A half can be readable and/or writable. + // + // These four channels act as a lock, + // and allow waiting for readability/writability. + // When the half is unlocked, exactly one channel contains a value. + // When the half is locked, all channels are empty. + lockr chan struct{} // readable + lockw chan struct{} // writable + lockrw chan struct{} // readable and writable + lockc chan struct{} // neither readable nor writable + + bufMax int // maximum buffer size + buf bytes.Buffer + readErr error // error returned by reads + writeErr error // error returned by writes +} + +func newSynctestNetConnHalf(addr net.Addr) *synctestNetConnHalf { + h := &synctestNetConnHalf{ + addr: addr, + lockw: make(chan struct{}, 1), + lockr: make(chan struct{}, 1), + lockrw: make(chan struct{}, 1), + lockc: make(chan struct{}, 1), + bufMax: math.MaxInt, // unlimited + } + h.unlock() + return h +} + +func (h *synctestNetConnHalf) lock() { + select { + case <-h.lockw: + case <-h.lockr: + case <-h.lockrw: + case <-h.lockc: + } +} + +func (h *synctestNetConnHalf) unlock() { + canRead := h.readErr != nil || h.buf.Len() > 0 + canWrite := h.writeErr != nil || h.bufMax > h.buf.Len() + switch { + case canRead && canWrite: + h.lockrw <- struct{}{} + case canRead: + h.lockr <- struct{}{} + case canWrite: + h.lockw <- struct{}{} + default: + h.lockc <- struct{}{} + } +} + +func (h *synctestNetConnHalf) readWaitAndLock() error { + select { + case <-h.lockr: + return nil + case <-h.lockrw: + return nil + default: + } + ctx := h.rctx.context() + select { + case <-h.lockr: + return nil + case <-h.lockrw: + return nil + case <-ctx.Done(): + return context.Cause(ctx) + } +} + +func (h *synctestNetConnHalf) writeWaitAndLock() error { + select { + case <-h.lockw: + return nil + case <-h.lockrw: + return nil + default: + } + ctx := h.wctx.context() + select { + case <-h.lockw: + return nil + case <-h.lockrw: + return nil + case <-ctx.Done(): + return context.Cause(ctx) + } +} + +func (h *synctestNetConnHalf) peek() []byte { + h.lock() + defer h.unlock() + return h.buf.Bytes() +} + +func (h *synctestNetConnHalf) isClosedByPeer() bool { + h.lock() + defer h.unlock() + return h.readErr != nil +} + +func (h *synctestNetConnHalf) read(b []byte) (n int, err error) { + if err := h.readWaitAndLock(); err != nil { + return 0, err + } + defer h.unlock() + if h.buf.Len() == 0 && h.readErr != nil { + return 0, h.readErr + } + return h.buf.Read(b) +} + +func (h *synctestNetConnHalf) setReadBufferSize(size int) { + h.lock() + defer h.unlock() + h.bufMax = size +} + +func (h *synctestNetConnHalf) write(b []byte) (n int, err error) { + for n < len(b) { + nn, err := h.writePartial(b[n:]) + n += nn + if err != nil { + return n, err + } + } + return n, nil +} + +func (h *synctestNetConnHalf) writePartial(b []byte) (n int, err error) { + if err := h.writeWaitAndLock(); err != nil { + return 0, err + } + defer h.unlock() + if h.writeErr != nil { + return 0, h.writeErr + } + writeMax := h.bufMax - h.buf.Len() + if writeMax < len(b) { + b = b[:writeMax] + } + return h.buf.Write(b) +} + +func (h *synctestNetConnHalf) setReadError(err error) { + h.lock() + defer h.unlock() + if h.readErr == nil { + h.readErr = err + } +} + +func (h *synctestNetConnHalf) setWriteError(err error) { + h.lock() + defer h.unlock() + if h.writeErr == nil { + h.writeErr = err + } +} + +// deadlineContext converts a changable deadline (as in net.Conn.SetDeadline) into a Context. +type deadlineContext struct { + mu sync.Mutex + ctx context.Context + cancel context.CancelCauseFunc + timer timer +} + +// context returns a Context which expires when the deadline does. +func (t *deadlineContext) context() context.Context { + t.mu.Lock() + defer t.mu.Unlock() + if t.ctx == nil { + t.ctx, t.cancel = context.WithCancelCause(context.Background()) + } + return t.ctx +} + +// setDeadline sets the current deadline. +func (t *deadlineContext) setDeadline(group *synctestGroup, deadline time.Time) { + t.mu.Lock() + defer t.mu.Unlock() + // If t.ctx is non-nil and t.cancel is nil, then t.ctx was canceled + // and we should create a new one. + if t.ctx == nil || t.cancel == nil { + t.ctx, t.cancel = context.WithCancelCause(context.Background()) + } + // Stop any existing deadline from expiring. + if t.timer != nil { + t.timer.Stop() + } + if deadline.IsZero() { + // No deadline. + return + } + if !deadline.After(group.Now()) { + // Deadline has already expired. + t.cancel(os.ErrDeadlineExceeded) + t.cancel = nil + return + } + if t.timer != nil { + // Reuse existing deadline timer. + t.timer.Reset(deadline.Sub(group.Now())) + return + } + // Create a new timer to cancel the context at the deadline. + t.timer = group.AfterFunc(deadline.Sub(group.Now()), func() { + t.mu.Lock() + defer t.mu.Unlock() + t.cancel(os.ErrDeadlineExceeded) + t.cancel = nil + }) +} diff --git a/pkg/http2/pipe.go b/pkg/http2/pipe.go index 684d984..3b9f06b 100644 --- a/pkg/http2/pipe.go +++ b/pkg/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/pkg/http2/pipe_test.go b/pkg/http2/pipe_test.go index 67562a9..326b94d 100644 --- a/pkg/http2/pipe_test.go +++ b/pkg/http2/pipe_test.go @@ -8,7 +8,6 @@ import ( "bytes" "errors" "io" - "io/ioutil" "testing" ) @@ -85,7 +84,7 @@ func TestPipeCloseWithError(t *testing.T) { io.WriteString(p, body) a := errors.New("test error") p.CloseWithError(a) - all, err := ioutil.ReadAll(p) + all, err := io.ReadAll(p) if string(all) != body { t.Errorf("read bytes = %q; want %q", all, body) } @@ -112,7 +111,7 @@ func TestPipeBreakWithError(t *testing.T) { io.WriteString(p, "foo") a := errors.New("test err") p.BreakWithError(a) - all, err := ioutil.ReadAll(p) + all, err := io.ReadAll(p) if string(all) != "" { t.Errorf("read bytes = %q; want empty string", all) } diff --git a/pkg/http2/server.go b/pkg/http2/server.go index fbc2081..6be13b4 100644 --- a/pkg/http2/server.go +++ b/pkg/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -53,10 +54,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -125,8 +130,25 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -154,57 +176,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState -} - -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface } -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() } - return defaultMaxReadFrameSize } -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() } - return defaultMaxStreams + return time.Now() } -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) } - return initialHeaderTableSize + return timeTimer{time.NewTimer(d)} } -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames + return timeTimer{time.AfterFunc(d, f)} } type serverInternalState struct { @@ -303,7 +307,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -320,12 +324,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -400,16 +423,22 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -419,13 +448,19 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -435,7 +470,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -451,15 +486,15 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -492,7 +527,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -529,7 +564,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -569,6 +604,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -588,6 +624,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -598,9 +635,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -615,11 +657,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -649,12 +687,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -732,11 +770,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -815,8 +849,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -847,6 +882,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -885,7 +921,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -897,20 +933,24 @@ func (sc *serverConn) serve() { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, - }, + write: settings, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -925,16 +965,23 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + if sc.srv.IdleTimeout > 0 { + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -948,6 +995,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -979,6 +1027,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1001,7 +1051,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1017,12 +1067,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1030,6 +1107,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1061,10 +1139,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1282,6 +1360,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1429,7 +1511,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1482,6 +1564,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown @@ -1591,6 +1678,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -1678,7 +1770,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1700,6 +1792,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -1753,6 +1846,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2058,9 +2154,9 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2079,7 +2175,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2156,9 +2252,9 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.inflow.init(sc.initialStreamRecvWindowSize) + if sc.hs.WriteTimeout > 0 { + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2183,11 +2279,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2211,6 +2313,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2237,6 +2342,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2278,7 +2384,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { @@ -2382,6 +2488,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2678,7 +2785,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2800,7 +2907,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2816,9 +2923,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2826,7 +2933,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2842,14 +2949,19 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } @@ -3296,7 +3408,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/pkg/http2/server_push_test.go b/pkg/http2/server_push_test.go index cda8f43..69e4c3b 100644 --- a/pkg/http2/server_push_test.go +++ b/pkg/http2/server_push_test.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "reflect" "runtime" @@ -40,7 +39,7 @@ func TestServer_Push_Success(t *testing.T) { if r.Body == nil { return fmt.Errorf("nil Body") } - if buf, err := ioutil.ReadAll(r.Body); err != nil || len(buf) != 0 { + if buf, err := io.ReadAll(r.Body); err != nil || len(buf) != 0 { return fmt.Errorf("ReadAll(Body)=%q,%v, want '',nil", buf, err) } return nil @@ -106,7 +105,7 @@ func TestServer_Push_Success(t *testing.T) { errc <- fmt.Errorf("unknown RequestURL %q", r.URL.RequestURI()) } }) - stURL = st.ts.URL + stURL = "https://" + st.authority() // Send one request, which should push two responses. st.greet() @@ -170,7 +169,7 @@ func TestServer_Push_Success(t *testing.T) { return checkPushPromise(f, 2, [][2]string{ {":method", "GET"}, {":scheme", "https"}, - {":authority", st.ts.Listener.Addr().String()}, + {":authority", st.authority()}, {":path", "/pushed?get"}, {"user-agent", userAgent}, }) @@ -179,7 +178,7 @@ func TestServer_Push_Success(t *testing.T) { return checkPushPromise(f, 4, [][2]string{ {":method", "HEAD"}, {":scheme", "https"}, - {":authority", st.ts.Listener.Addr().String()}, + {":authority", st.authority()}, {":path", "/pushed?head"}, {"cookie", cookie}, {"user-agent", userAgent}, @@ -219,12 +218,12 @@ func TestServer_Push_Success(t *testing.T) { consumed := map[uint32]int{} for k := 0; len(expected) > 0; k++ { - f, err := st.readFrame() - if err != nil { + f := st.readFrame() + if f == nil { for id, left := range expected { t.Errorf("stream %d: missing %d frames", id, len(left)) } - t.Fatalf("readFrame %d: %v", k, err) + break } id := f.Header().StreamID label := fmt.Sprintf("stream %d, frame %d", id, consumed[id]) @@ -340,10 +339,10 @@ func testServer_Push_RejectSingleRequest(t *testing.T, doPush func(http.Pusher, t.Error(err) } // Should not get a PUSH_PROMISE frame. - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Error("stream should end after headers") - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) } func TestServer_Push_RejectIfDisabled(t *testing.T) { @@ -460,7 +459,7 @@ func TestServer_Push_StateTransitions(t *testing.T) { } getSlash(st) // After the PUSH_PROMISE is sent, the stream should be stateHalfClosedRemote. - st.wantPushPromise() + _ = readFrame[*PushPromiseFrame](t, st) if got, want := st.streamState(2), stateHalfClosedRemote; got != want { t.Fatalf("streamState(2)=%v, want %v", got, want) } @@ -469,10 +468,10 @@ func TestServer_Push_StateTransitions(t *testing.T) { // the stream before we check st.streamState(2) -- should that happen, we'll // see stateClosed and fail the above check. close(gotPromise) - st.wantHeaders() - if df := st.wantData(); !df.StreamEnded() { - t.Fatal("expected END_STREAM flag on DATA") - } + st.wantHeaders(wantHeader{ + streamID: 2, + endStream: false, + }) if got, want := st.streamState(2), stateClosed; got != want { t.Fatalf("streamState(2)=%v, want %v", got, want) } @@ -555,9 +554,9 @@ func TestServer_Push_Underflow(t *testing.T) { numPushPromises := 0 numHeaders := 0 for numHeaders < numRequests*2 || numPushPromises < numRequests { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) + f := st.readFrame() + if f == nil { + st.t.Fatal("conn is idle, want frame") } switch f := f.(type) { case *HeadersFrame: diff --git a/pkg/http2/server_test.go b/pkg/http2/server_test.go index 1fdd191..201cf0d 100644 --- a/pkg/http2/server_test.go +++ b/pkg/http2/server_test.go @@ -14,8 +14,8 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" + "math" "net" "net/http" "net/http/httptest" @@ -38,7 +38,7 @@ func stderrv() io.Writer { return os.Stderr } - return ioutil.Discard + return io.Discard } type safeBuffer struct { @@ -65,16 +65,16 @@ func (sb *safeBuffer) Len() int { } type serverTester struct { - cc net.Conn // client conn - t testing.TB - ts *httptest.Server - fr *Framer - serverLogBuf safeBuffer // logger for httptest.Server - logFilter []string // substrings to filter out - scMu sync.Mutex // guards sc - sc *serverConn - hpackDec *hpack.Decoder - decodedHeaders [][2]string + cc net.Conn // client conn + t testing.TB + group *synctestGroup + h1server *http.Server + h2server *Server + serverLogBuf safeBuffer // logger for httptest.Server + logFilter []string // substrings to filter out + scMu sync.Mutex // guards sc + sc *serverConn + testConnFramer // If http2debug!=2, then we capture Frame debug logs that will be written // to t.Log after a test fails. The read and write logs use separate locks @@ -101,23 +101,153 @@ func resetHooks() { testHookOnPanicMu.Unlock() } +func newTestServer(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *httptest.Server { + ts := httptest.NewUnstartedServer(handler) + ts.EnableHTTP2 = true + ts.Config.ErrorLog = log.New(twriter{t: t}, "", log.LstdFlags) + h2server := new(Server) + for _, opt := range opts { + switch v := opt.(type) { + case func(*httptest.Server): + v(ts) + case func(*http.Server): + v(ts.Config) + case func(*Server): + v(h2server) + default: + t.Fatalf("unknown newTestServer option type %T", v) + } + } + ConfigureServer(ts.Config, h2server) + + // ConfigureServer populates ts.Config.TLSConfig. + // Copy it to ts.TLS as well. + ts.TLS = ts.Config.TLSConfig + + // Go 1.22 changes the default minimum TLS version to TLS 1.2, + // in order to properly test cases where we want to reject low + // TLS versions, we need to explicitly configure the minimum + // version here. + ts.Config.TLSConfig.MinVersion = tls.VersionTLS10 + + ts.StartTLS() + t.Cleanup(func() { + ts.CloseClientConnections() + ts.Close() + }) + + return ts +} + type serverTesterOpt string -var optOnlyServer = serverTesterOpt("only_server") -var optQuiet = serverTesterOpt("quiet_logging") var optFramerReuseFrames = serverTesterOpt("frame_reuse_frames") +var optQuiet = func(server *http.Server) { + server.ErrorLog = log.New(io.Discard, "", 0) +} + func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { + t.Helper() + g := newSynctest(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)) + t.Cleanup(func() { + g.Close(t) + }) + + h1server := &http.Server{} + h2server := &Server{ + group: g, + } + tlsState := tls.ConnectionState{ + Version: tls.VersionTLS13, + ServerName: "go.dev", + CipherSuite: tls.TLS_AES_128_GCM_SHA256, + } + for _, opt := range opts { + switch v := opt.(type) { + case func(*Server): + v(h2server) + case func(*http.Server): + v(h1server) + case func(*tls.ConnectionState): + v(&tlsState) + default: + t.Fatalf("unknown newServerTester option type %T", v) + } + } + ConfigureServer(h1server, h2server) + + cli, srv := synctestNetPipe(g) + cli.SetReadDeadline(g.Now()) + cli.autoWait = true + + st := &serverTester{ + t: t, + cc: cli, + group: g, + h1server: h1server, + h2server: h2server, + } + st.hpackEnc = hpack.NewEncoder(&st.headerBuf) + if h1server.ErrorLog == nil { + h1server.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags) + } + + t.Cleanup(func() { + st.Close() + g.AdvanceTime(goAwayTimeout) // give server time to shut down + }) + + connc := make(chan *serverConn) + go func() { + g.Join() + h2server.serveConn(&netConnWithConnectionState{ + Conn: srv, + state: tlsState, + }, &ServeConnOpts{ + Handler: handler, + BaseConfig: h1server, + }, func(sc *serverConn) { + connc <- sc + }) + }() + st.sc = <-connc + + st.fr = NewFramer(st.cc, st.cc) + st.testConnFramer = testConnFramer{ + t: t, + fr: NewFramer(st.cc, st.cc), + dec: hpack.NewDecoder(initialHeaderTableSize, nil), + } + g.Wait() + return st +} + +type netConnWithConnectionState struct { + net.Conn + state tls.ConnectionState +} + +func (c *netConnWithConnectionState) ConnectionState() tls.ConnectionState { + return c.state +} + +// newServerTesterWithRealConn creates a test server listening on a localhost port. +// Mostly superseded by newServerTester, which creates a test server using a fake +// net.Conn and synthetic time. This function is still around because some benchmarks +// rely on it; new tests should use newServerTester. +func newServerTesterWithRealConn(t testing.TB, handler http.HandlerFunc, opts ...interface{}) *serverTester { resetHooks() ts := httptest.NewUnstartedServer(handler) + t.Cleanup(ts.Close) tlsConfig := &tls.Config{ InsecureSkipVerify: true, NextProtos: []string{NextProtoTLS}, } - var onlyServer, quiet, framerReuseFrames bool + var framerReuseFrames bool h2server := new(Server) for _, opt := range opts { switch v := opt.(type) { @@ -125,14 +255,12 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{} v(tlsConfig) case func(*httptest.Server): v(ts) + case func(*http.Server): + v(ts.Config) case func(*Server): v(h2server) case serverTesterOpt: switch v { - case optOnlyServer: - onlyServer = true - case optQuiet: - quiet = true case optFramerReuseFrames: framerReuseFrames = true } @@ -152,16 +280,12 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{} ts.Config.TLSConfig.MinVersion = tls.VersionTLS10 st := &serverTester{ - t: t, - ts: ts, + t: t, } st.hpackEnc = hpack.NewEncoder(&st.headerBuf) - st.hpackDec = hpack.NewDecoder(initialHeaderTableSize, st.onHeaderField) ts.TLS = ts.Config.TLSConfig // the httptest.Server has its own copy of this TLS config - if quiet { - ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) - } else { + if ts.Config.ErrorLog == nil { ts.Config.ErrorLog = log.New(io.MultiWriter(stderrv(), twriter{t: t, st: st}, &st.serverLogBuf), "", log.LstdFlags) } ts.StartTLS() @@ -175,36 +299,54 @@ func newServerTester(t testing.TB, handler http.HandlerFunc, opts ...interface{} st.sc = v } log.SetOutput(io.MultiWriter(stderrv(), twriter{t: t, st: st})) - if !onlyServer { - cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) - if err != nil { - t.Fatal(err) + cc, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) + if err != nil { + t.Fatal(err) + } + st.cc = cc + st.testConnFramer = testConnFramer{ + t: t, + fr: NewFramer(st.cc, st.cc), + dec: hpack.NewDecoder(initialHeaderTableSize, nil), + } + if framerReuseFrames { + st.fr.SetReuseFrames() + } + if !logFrameReads && !logFrameWrites { + st.fr.debugReadLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameReadLogMu.Lock() + fmt.Fprintf(&st.frameReadLogBuf, m, v...) + st.frameReadLogMu.Unlock() } - st.cc = cc - st.fr = NewFramer(cc, cc) - if framerReuseFrames { - st.fr.SetReuseFrames() - } - if !logFrameReads && !logFrameWrites { - st.fr.debugReadLoggerf = func(m string, v ...interface{}) { - m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" - st.frameReadLogMu.Lock() - fmt.Fprintf(&st.frameReadLogBuf, m, v...) - st.frameReadLogMu.Unlock() - } - st.fr.debugWriteLoggerf = func(m string, v ...interface{}) { - m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" - st.frameWriteLogMu.Lock() - fmt.Fprintf(&st.frameWriteLogBuf, m, v...) - st.frameWriteLogMu.Unlock() - } - st.fr.logReads = true - st.fr.logWrites = true + st.fr.debugWriteLoggerf = func(m string, v ...interface{}) { + m = time.Now().Format("2006-01-02 15:04:05.999999999 ") + strings.TrimPrefix(m, "http2: ") + "\n" + st.frameWriteLogMu.Lock() + fmt.Fprintf(&st.frameWriteLogBuf, m, v...) + st.frameWriteLogMu.Unlock() } + st.fr.logReads = true + st.fr.logWrites = true } return st } +// sync waits for all goroutines to idle. +func (st *serverTester) sync() { + if st.group != nil { + st.group.Wait() + } +} + +// advance advances synthetic time by a duration. +func (st *serverTester) advance(d time.Duration) { + st.group.AdvanceTime(d) +} + +func (st *serverTester) authority() string { + return "dummy.tld" +} + func (st *serverTester) closeConn() { st.scMu.Lock() defer st.scMu.Unlock() @@ -280,7 +422,6 @@ func (st *serverTester) Close() { st.cc.Close() } } - st.ts.Close() if st.cc != nil { st.cc.Close() } @@ -290,13 +431,16 @@ func (st *serverTester) Close() { // greet initiates the client's HTTP/2 connection into a state where // frames may be sent. func (st *serverTester) greet() { + st.t.Helper() st.greetAndCheckSettings(func(Setting) error { return nil }) } func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error) { + st.t.Helper() st.writePreface() - st.writeInitialSettings() - st.wantSettings().ForeachSetting(checkSetting) + st.writeSettings() + st.sync() + readFrame[*SettingsFrame](st.t, st).ForeachSetting(checkSetting) st.writeSettingsAck() // The initial WINDOW_UPDATE and SETTINGS ACK can come in any order. @@ -304,9 +448,9 @@ func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error var gotWindowUpdate bool for i := 0; i < 2; i++ { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) + f := st.readFrame() + if f == nil { + st.t.Fatal("wanted a settings ACK and window update, got none") } switch f := f.(type) { case *SettingsFrame: @@ -319,7 +463,8 @@ func (st *serverTester) greetAndCheckSettings(checkSetting func(s Setting) error if f.FrameHeader.StreamID != 0 { st.t.Fatalf("WindowUpdate StreamID = %d; want 0", f.FrameHeader.StreamID) } - incr := uint32(st.sc.srv.initialConnRecvWindowSize() - initialWindowSize) + conf := configFromServer(st.sc.hs, st.sc.srv) + incr := uint32(conf.MaxUploadBufferPerConnection - initialWindowSize) if f.Increment != incr { st.t.Fatalf("WindowUpdate increment = %d; want %d", f.Increment, incr) } @@ -348,34 +493,6 @@ func (st *serverTester) writePreface() { } } -func (st *serverTester) writeInitialSettings() { - if err := st.fr.WriteSettings(); err != nil { - if runtime.GOOS == "openbsd" && strings.HasSuffix(err.Error(), "write: broken pipe") { - st.t.Logf("Error writing initial SETTINGS frame from client to server: %v", err) - st.t.Skipf("Skipping test with known OpenBSD failure mode. (See https://go.dev/issue/52208.)") - } - st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) - } -} - -func (st *serverTester) writeSettingsAck() { - if err := st.fr.WriteSettingsAck(); err != nil { - st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) - } -} - -func (st *serverTester) writeHeaders(p HeadersFrameParam) { - if err := st.fr.WriteHeaders(p); err != nil { - st.t.Fatalf("Error writing HEADERS: %v", err) - } -} - -func (st *serverTester) writePriority(id uint32, p PriorityParam) { - if err := st.fr.WritePriority(id, p); err != nil { - st.t.Fatalf("Error writing PRIORITY: %v", err) - } -} - func (st *serverTester) encodeHeaderField(k, v string) { err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) if err != nil { @@ -409,7 +526,7 @@ func (st *serverTester) encodeHeader(headers ...string) []byte { } st.headerBuf.Reset() - defaultAuthority := st.ts.Listener.Addr().String() + defaultAuthority := st.authority() if len(headers) == 0 { // Fast path, mostly for benchmarks, so test code doesn't pollute @@ -474,150 +591,13 @@ func (st *serverTester) bodylessReq1(headers ...string) { }) } -func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { - if err := st.fr.WriteData(streamID, endStream, data); err != nil { - st.t.Fatalf("Error writing DATA: %v", err) - } -} - -func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, pad []byte) { - if err := st.fr.WriteDataPadded(streamID, endStream, data, pad); err != nil { - st.t.Fatalf("Error writing DATA: %v", err) - } -} - -// writeReadPing sends a PING and immediately reads the PING ACK. -// It will fail if any other unread data was pending on the connection. -func (st *serverTester) writeReadPing() { - data := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} - if err := st.fr.WritePing(false, data); err != nil { - st.t.Fatalf("Error writing PING: %v", err) - } - p := st.wantPing() - if p.Flags&FlagPingAck == 0 { - st.t.Fatalf("got a PING, want a PING ACK") - } - if p.Data != data { - st.t.Fatalf("got PING data = %x, want %x", p.Data, data) - } -} - -func (st *serverTester) readFrame() (Frame, error) { - return st.fr.ReadFrame() -} - -func (st *serverTester) wantHeaders() *HeadersFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a HEADERS frame: %v", err) - } - hf, ok := f.(*HeadersFrame) - if !ok { - st.t.Fatalf("got a %T; want *HeadersFrame", f) - } - return hf -} - -func (st *serverTester) wantContinuation() *ContinuationFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a CONTINUATION frame: %v", err) - } - cf, ok := f.(*ContinuationFrame) - if !ok { - st.t.Fatalf("got a %T; want *ContinuationFrame", f) - } - return cf -} - -func (st *serverTester) wantData() *DataFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a DATA frame: %v", err) - } - df, ok := f.(*DataFrame) - if !ok { - st.t.Fatalf("got a %T; want *DataFrame", f) - } - return df -} - -func (st *serverTester) wantSettings() *SettingsFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) - } - sf, ok := f.(*SettingsFrame) - if !ok { - st.t.Fatalf("got a %T; want *SettingsFrame", f) - } - return sf -} - -func (st *serverTester) wantPing() *PingFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a PING frame: %v", err) - } - pf, ok := f.(*PingFrame) - if !ok { - st.t.Fatalf("got a %T; want *PingFrame", f) - } - return pf -} - -func (st *serverTester) wantGoAway() *GoAwayFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a GOAWAY frame: %v", err) - } - gf, ok := f.(*GoAwayFrame) - if !ok { - st.t.Fatalf("got a %T; want *GoAwayFrame", f) - } - return gf -} - -func (st *serverTester) wantRSTStream(streamID uint32, errCode ErrCode) { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting an RSTStream frame: %v", err) - } - rs, ok := f.(*RSTStreamFrame) - if !ok { - st.t.Fatalf("got a %T; want *RSTStreamFrame", f) - } - if rs.FrameHeader.StreamID != streamID { - st.t.Fatalf("RSTStream StreamID = %d; want %d", rs.FrameHeader.StreamID, streamID) - } - if rs.ErrCode != errCode { - st.t.Fatalf("RSTStream ErrCode = %d (%s); want %d (%s)", rs.ErrCode, rs.ErrCode, errCode, errCode) - } -} - -func (st *serverTester) wantWindowUpdate(streamID, incr uint32) { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a WINDOW_UPDATE frame: %v", err) - } - wu, ok := f.(*WindowUpdateFrame) - if !ok { - st.t.Fatalf("got a %T; want *WindowUpdateFrame", f) - } - if wu.FrameHeader.StreamID != streamID { - st.t.Fatalf("WindowUpdate StreamID = %d; want %d", wu.FrameHeader.StreamID, streamID) - } - if wu.Increment != incr { - st.t.Fatalf("WindowUpdate increment = %d; want %d", wu.Increment, incr) - } -} - func (st *serverTester) wantFlowControlConsumed(streamID, consumed int32) { + conf := configFromServer(st.sc.hs, st.sc.srv) var initial int32 if streamID == 0 { - initial = st.sc.srv.initialConnRecvWindowSize() + initial = conf.MaxUploadBufferPerConnection } else { - initial = st.sc.srv.initialStreamRecvWindowSize() + initial = conf.MaxUploadBufferPerStream } donec := make(chan struct{}) st.sc.sendServeMsg(func(sc *serverConn) { @@ -634,32 +614,6 @@ func (st *serverTester) wantFlowControlConsumed(streamID, consumed int32) { <-donec } -func (st *serverTester) wantSettingsAck() { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) - } - sf, ok := f.(*SettingsFrame) - if !ok { - st.t.Fatalf("Wanting a settings ACK, received a %T", f) - } - if !sf.Header().Flags.Has(FlagSettingsAck) { - st.t.Fatal("Settings Frame didn't have ACK set") - } -} - -func (st *serverTester) wantPushPromise() *PushPromiseFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) - } - ppf, ok := f.(*PushPromiseFrame) - if !ok { - st.t.Fatalf("Wanted PushPromise, received %T", ppf) - } - return ppf -} - func TestServer(t *testing.T) { gotReq := make(chan bool, 1) st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { @@ -668,12 +622,6 @@ func TestServer(t *testing.T) { }) defer st.Close() - covers("3.5", ` - The server connection preface consists of a potentially empty - SETTINGS frame ([SETTINGS]) that MUST be the first frame the - server sends in the HTTP/2 connection. - `) - st.greet() st.writeHeaders(HeadersFrameParam{ StreamID: 1, // clients send odd numbers @@ -866,7 +814,7 @@ func testBodyContents(t *testing.T, wantContentLength int64, wantBody string, wr if r.ContentLength != wantContentLength { t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) } - all, err := ioutil.ReadAll(r.Body) + all, err := io.ReadAll(r.Body) if err != nil { t.Fatal(err) } @@ -887,7 +835,7 @@ func testBodyContentsFail(t *testing.T, wantContentLength int64, wantReadError s if r.ContentLength != wantContentLength { t.Errorf("ContentLength = %v; want %d", r.ContentLength, wantContentLength) } - all, err := ioutil.ReadAll(r.Body) + all, err := io.ReadAll(r.Body) if err == nil { t.Fatalf("expected an error (%q) reading from the body. Successfully read %q instead.", wantReadError, all) @@ -1095,37 +1043,32 @@ func testRejectRequest(t *testing.T, send func(*serverTester)) { st.wantRSTStream(1, ErrCodeProtocol) } -func testRejectRequestWithProtocolError(t *testing.T, send func(*serverTester)) { +func newServerTesterForError(t *testing.T) *serverTester { + t.Helper() st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { t.Error("server request made it to handler; should've been rejected") }, optQuiet) - defer st.Close() - st.greet() - send(st) - gf := st.wantGoAway() - if gf.ErrCode != ErrCodeProtocol { - t.Errorf("err code = %v; want %v", gf.ErrCode, ErrCodeProtocol) - } + return st } // Section 5.1, on idle connections: "Receiving any frame other than // HEADERS or PRIORITY on a stream in this state MUST be treated as a // connection error (Section 5.4.1) of type PROTOCOL_ERROR." func TestRejectFrameOnIdle_WindowUpdate(t *testing.T) { - testRejectRequestWithProtocolError(t, func(st *serverTester) { - st.fr.WriteWindowUpdate(123, 456) - }) + st := newServerTesterForError(t) + st.fr.WriteWindowUpdate(123, 456) + st.wantGoAway(123, ErrCodeProtocol) } func TestRejectFrameOnIdle_Data(t *testing.T) { - testRejectRequestWithProtocolError(t, func(st *serverTester) { - st.fr.WriteData(123, true, nil) - }) + st := newServerTesterForError(t) + st.fr.WriteData(123, true, nil) + st.wantGoAway(123, ErrCodeProtocol) } func TestRejectFrameOnIdle_RSTStream(t *testing.T) { - testRejectRequestWithProtocolError(t, func(st *serverTester) { - st.fr.WriteRSTStream(123, ErrCodeCancel) - }) + st := newServerTesterForError(t) + st.fr.WriteRSTStream(123, ErrCodeCancel) + st.wantGoAway(123, ErrCodeProtocol) } func TestServer_Request_Connect(t *testing.T) { @@ -1199,7 +1142,7 @@ func TestServer_Ping(t *testing.T) { t.Fatal(err) } - pf := st.wantPing() + pf := readFrame[*PingFrame](t, st) if !pf.Flags.Has(FlagPingAck) { t.Error("response ping doesn't have ACK set") } @@ -1222,38 +1165,36 @@ func (l *filterListener) Accept() (net.Conn, error) { } func TestServer_MaxQueuedControlFrames(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } + // Goroutine debugging makes this test very slow. + disableGoroutineTracking(t) - st := newServerTester(t, nil, func(ts *httptest.Server) { - // TCP buffer sizes on test systems aren't under our control and can be large. - // Create a conn that blocks after 10000 bytes written. - ts.Listener = &filterListener{ - Listener: ts.Listener, - accept: func(conn net.Conn) (net.Conn, error) { - return newBlockingWriteConn(conn, 10000), nil - }, - } - }) - defer st.Close() + st := newServerTester(t, nil) st.greet() - const extraPings = 500000 // enough to fill the TCP buffers + st.cc.(*synctestNetConn).SetReadBufferSize(0) // all writes block + st.cc.(*synctestNetConn).autoWait = false // don't sync after every write + // Send maxQueuedControlFrames pings, plus a few extra + // to account for ones that enter the server's write buffer. + const extraPings = 2 for i := 0; i < maxQueuedControlFrames+extraPings; i++ { pingData := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} - if err := st.fr.WritePing(false, pingData); err != nil { - if i == 0 { - t.Fatal(err) - } - // We expect the connection to get closed by the server when the TCP - // buffer fills up and the write queue reaches MaxQueuedControlFrames. - t.Logf("sent %d PING frames", i) - return + st.fr.WritePing(false, pingData) + } + st.group.Wait() + + // Unblock the server. + // It should have closed the connection after exceeding the control frame limit. + st.cc.(*synctestNetConn).SetReadBufferSize(math.MaxInt) + + st.advance(goAwayTimeout) + // Some frames may have persisted in the server's buffers. + for i := 0; i < 10; i++ { + if st.readFrame() == nil { + break } } - t.Errorf("unexpected success sending all PING frames") + st.wantClosed() } func TestServer_RejectsLargeFrames(t *testing.T) { @@ -1269,15 +1210,9 @@ func TestServer_RejectsLargeFrames(t *testing.T) { // will only read the first 9 bytes (the headre) and then disconnect. st.fr.WriteRawFrame(0xff, 0, 0, make([]byte, defaultMaxReadFrameSize+1)) - gf := st.wantGoAway() - if gf.ErrCode != ErrCodeFrameSize { - t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFrameSize) - } - if st.serverLogBuf.Len() != 0 { - // Previously we spun here for a bit until the GOAWAY disconnect - // timer fired, logging while we fired. - t.Errorf("unexpected server output: %.500s\n", st.serverLogBuf.Bytes()) - } + st.wantGoAway(0, ErrCodeFrameSize) + st.advance(goAwayTimeout) + st.wantClosed() } func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { @@ -1303,7 +1238,6 @@ func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { EndStream: false, // data coming EndHeaders: true, }) - st.writeReadPing() // Write less than half the max window of data and consume it. // The server doesn't return flow control yet, buffering the 1024 bytes to @@ -1311,20 +1245,17 @@ func TestServer_Handler_Sends_WindowUpdate(t *testing.T) { data := make([]byte, windowSize) st.writeData(1, false, data[:1024]) puppet.do(readBodyHandler(t, string(data[:1024]))) - st.writeReadPing() // Write up to the window limit. // The server returns the buffered credit. st.writeData(1, false, data[1024:]) st.wantWindowUpdate(0, 1024) st.wantWindowUpdate(1, 1024) - st.writeReadPing() // The handler consumes the data and the server returns credit. puppet.do(readBodyHandler(t, string(data[1024:]))) st.wantWindowUpdate(0, windowSize-1024) st.wantWindowUpdate(1, windowSize-1024) - st.writeReadPing() } // the version of the TestServer_Handler_Sends_WindowUpdate with padding. @@ -1348,7 +1279,6 @@ func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) { EndStream: false, EndHeaders: true, }) - st.writeReadPing() // Write half a window of data, with some padding. // The server doesn't return the padding yet, buffering the 5 bytes to combine @@ -1356,7 +1286,6 @@ func TestServer_Handler_Sends_WindowUpdate_Padding(t *testing.T) { data := make([]byte, windowSize/2) pad := make([]byte, 4) st.writeDataPadded(1, false, data, pad) - st.writeReadPing() // The handler consumes the body. // The server returns flow control for the body and padding @@ -1373,13 +1302,7 @@ func TestServer_Send_GoAway_After_Bogus_WindowUpdate(t *testing.T) { if err := st.fr.WriteWindowUpdate(0, 1<<31-1); err != nil { t.Fatal(err) } - gf := st.wantGoAway() - if gf.ErrCode != ErrCodeFlowControl { - t.Errorf("GOAWAY err = %v; want %v", gf.ErrCode, ErrCodeFlowControl) - } - if gf.LastStreamID != 0 { - t.Errorf("GOAWAY last stream ID = %v; want %v", gf.LastStreamID, 0) - } + st.wantGoAway(0, ErrCodeFlowControl) } func TestServer_Send_RstStream_After_Bogus_WindowUpdate(t *testing.T) { @@ -1586,10 +1509,10 @@ func TestServer_StateTransitions(t *testing.T) { st.writeData(1, true, nil) leaveHandler <- true - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Fatal("expected END_STREAM flag") - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) if got, want := st.streamState(1), stateClosed; got != want { t.Errorf("at end, state is %v; want %v", got, want) @@ -1601,97 +1524,101 @@ func TestServer_StateTransitions(t *testing.T) { // test HEADERS w/o EndHeaders + another HEADERS (should get rejected) func TestServer_Rejects_HeadersNoEnd_Then_Headers(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: false, - }) - st.writeHeaders(HeadersFrameParam{ // Not a continuation. - StreamID: 3, // different stream. - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) + st := newServerTesterForError(t) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, }) + st.writeHeaders(HeadersFrameParam{ // Not a continuation. + StreamID: 3, // different stream. + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantGoAway(0, ErrCodeProtocol) } // test HEADERS w/o EndHeaders + PING (should get rejected) func TestServer_Rejects_HeadersNoEnd_Then_Ping(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: false, - }) - if err := st.fr.WritePing(false, [8]byte{}); err != nil { - t.Fatal(err) - } + st := newServerTesterForError(t) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, }) + if err := st.fr.WritePing(false, [8]byte{}); err != nil { + t.Fatal(err) + } + st.wantGoAway(0, ErrCodeProtocol) } // test HEADERS w/ EndHeaders + a continuation HEADERS (should get rejected) func TestServer_Rejects_HeadersEnd_Then_Continuation(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) - st.wantHeaders() - if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { - t.Fatal(err) - } + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optQuiet) + st.greet() + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, }) + if err := st.fr.WriteContinuation(1, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + st.wantGoAway(1, ErrCodeProtocol) } // test HEADERS w/o EndHeaders + a continuation HEADERS on wrong stream ID func TestServer_Rejects_HeadersNoEnd_Then_ContinuationWrongStream(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.writeHeaders(HeadersFrameParam{ - StreamID: 1, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: false, - }) - if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { - t.Fatal(err) - } + st := newServerTesterForError(t) + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: false, }) + if err := st.fr.WriteContinuation(3, true, encodeHeaderNoImplicit(t, "foo", "bar")); err != nil { + t.Fatal(err) + } + st.wantGoAway(0, ErrCodeProtocol) } // No HEADERS on stream 0. func TestServer_Rejects_Headers0(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - st.writeHeaders(HeadersFrameParam{ - StreamID: 0, - BlockFragment: st.encodeHeader(), - EndStream: true, - EndHeaders: true, - }) + st := newServerTesterForError(t) + st.fr.AllowIllegalWrites = true + st.writeHeaders(HeadersFrameParam{ + StreamID: 0, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, }) + st.wantGoAway(0, ErrCodeProtocol) } // No CONTINUATION on stream 0. func TestServer_Rejects_Continuation0(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { - t.Fatal(err) - } - }) + st := newServerTesterForError(t) + st.fr.AllowIllegalWrites = true + if err := st.fr.WriteContinuation(0, true, st.encodeHeader()); err != nil { + t.Fatal(err) + } + st.wantGoAway(0, ErrCodeProtocol) } // No PRIORITY on stream 0. func TestServer_Rejects_Priority0(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - st.fr.AllowIllegalWrites = true - st.writePriority(0, PriorityParam{StreamDep: 1}) - }) + st := newServerTesterForError(t) + st.fr.AllowIllegalWrites = true + st.writePriority(0, PriorityParam{StreamDep: 1}) + st.wantGoAway(0, ErrCodeProtocol) } // No HEADERS frame with a self-dependence. @@ -1717,36 +1644,15 @@ func TestServer_Rejects_PrioritySelfDependence(t *testing.T) { } func TestServer_Rejects_PushPromise(t *testing.T) { - testServerRejectsConn(t, func(st *serverTester) { - pp := PushPromiseParam{ - StreamID: 1, - PromiseID: 3, - } - if err := st.fr.WritePushPromise(pp); err != nil { - t.Fatal(err) - } - }) -} - -// testServerRejectsConn tests that the server hangs up with a GOAWAY -// frame and a server close after the client does something -// deserving a CONNECTION_ERROR. -func testServerRejectsConn(t *testing.T, writeReq func(*serverTester)) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}) - st.addLogFilter("connection error: PROTOCOL_ERROR") - defer st.Close() - st.greet() - writeReq(st) - - st.wantGoAway() - - fr, err := st.fr.ReadFrame() - if err == nil { - t.Errorf("ReadFrame got frame of type %T; want io.EOF", fr) + st := newServerTesterForError(t) + pp := PushPromiseParam{ + StreamID: 1, + PromiseID: 3, } - if err != io.EOF { - t.Errorf("ReadFrame = %v; want io.EOF", err) + if err := st.fr.WritePushPromise(pp); err != nil { + t.Fatal(err) } + st.wantGoAway(1, ErrCodeProtocol) } // testServerRejectsStream tests that the server sends a RST_STREAM with the provided @@ -1786,13 +1692,10 @@ func TestServer_Response_NoData(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Fatal("want END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) }) } @@ -1802,22 +1705,15 @@ func TestServer_Response_NoData_Header_FooBar(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Fatal("want END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"foo-bar", "some-value"}, - {"content-length", "0"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + header: http.Header{ + ":status": []string{"200"}, + "foo-bar": []string{"some-value"}, + "content-length": []string{"0"}, + }, + }) }) } @@ -1862,15 +1758,14 @@ func TestServerIgnoresContentLengthSignWhenWritingChunks(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-length", tt.wantCL}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("For case %q, value %q, got = %q; want %q", tt.name, tt.cl, goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + header: http.Header{ + ":status": []string{"200"}, + "content-length": []string{tt.wantCL}, + }, + }) }) } } @@ -1940,29 +1835,20 @@ func TestServer_Response_Data_Sniff_DoesntOverride(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("don't want END_STREAM, expecting data") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "foo/bar"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - df := st.wantData() - if !df.StreamEnded() { - t.Error("expected DATA to have END_STREAM flag") - } - if got := string(df.Data()); got != msg { - t.Errorf("got DATA %q; want %q", got, msg) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{"foo/bar"}, + "content-length": []string{strconv.Itoa(len(msg))}, + }, + }) + st.wantData(wantData{ + streamID: 1, + endStream: true, + data: []byte(msg), + }) }) } @@ -1974,16 +1860,15 @@ func TestServer_Response_TransferEncoding_chunked(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{"text/plain; charset=utf-8"}, + "content-length": []string{strconv.Itoa(len(msg))}, + }, + }) }) } @@ -1996,22 +1881,15 @@ func TestServer_Response_Data_IgnoreHeaderAfterWrite_After(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{"text/html; charset=utf-8"}, + "content-length": []string{strconv.Itoa(len(msg))}, + }, + }) }) } @@ -2025,23 +1903,16 @@ func TestServer_Response_Data_IgnoreHeaderAfterWrite_Overwrite(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"foo", "proper value"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "foo": []string{"proper value"}, + "content-type": []string{"text/html; charset=utf-8"}, + "content-length": []string{strconv.Itoa(len(msg))}, + }, + }) }) } @@ -2052,29 +1923,20 @@ func TestServer_Response_Data_SniffLenType(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("don't want END_STREAM, expecting data") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", strconv.Itoa(len(msg))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - df := st.wantData() - if !df.StreamEnded() { - t.Error("expected DATA to have END_STREAM flag") - } - if got := string(df.Data()); got != msg { - t.Errorf("got DATA %q; want %q", got, msg) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{"text/html; charset=utf-8"}, + "content-length": []string{strconv.Itoa(len(msg))}, + }, + }) + st.wantData(wantData{ + streamID: 1, + endStream: true, + data: []byte(msg), + }) }) } @@ -2088,40 +1950,25 @@ func TestServer_Response_Header_Flush_MidWrite(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/html; charset=utf-8"}, // sniffed - // and no content-length - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } - { - df := st.wantData() - if df.StreamEnded() { - t.Error("unexpected END_STREAM flag") - } - if got := string(df.Data()); got != msg { - t.Errorf("got DATA %q; want %q", got, msg) - } - } - { - df := st.wantData() - if !df.StreamEnded() { - t.Error("wanted END_STREAM flag on last data chunk") - } - if got := string(df.Data()); got != msg2 { - t.Errorf("got DATA %q; want %q", got, msg2) - } - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{"text/html; charset=utf-8"}, // sniffed + // and no content-length + }, + }) + st.wantData(wantData{ + streamID: 1, + endStream: false, + data: []byte(msg), + }) + st.wantData(wantData{ + streamID: 1, + endStream: true, + data: []byte(msg2), + }) }) } @@ -2157,25 +2004,18 @@ func TestServer_Response_LargeWrite(t *testing.T) { if err := st.fr.WriteWindowUpdate(0, size); err != nil { t.Fatal(err) } - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, // sniffed - // and no content-length - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{"text/plain; charset=utf-8"}, // sniffed + // and no content-length + }, + }) var bytes, frames int for { - df := st.wantData() + df := readFrame[*DataFrame](t, st) bytes += len(df.Data()) frames++ for _, b := range df.Data() { @@ -2226,27 +2066,26 @@ func TestServer_Response_LargeWrite_FlowControlled(t *testing.T) { getSlash(st) // make the single request - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + }) - df := st.wantData() - if got := len(df.Data()); got != reads[0] { - t.Fatalf("Initial window size = %d but got DATA with %d bytes", reads[0], got) - } + st.wantData(wantData{ + streamID: 1, + endStream: false, + size: reads[0], + }) - for _, quota := range reads[1:] { + for i, quota := range reads[1:] { if err := st.fr.WriteWindowUpdate(1, uint32(quota)); err != nil { t.Fatal(err) } - df := st.wantData() - if int(quota) != len(df.Data()) { - t.Fatalf("read %d bytes after giving %d quota", len(df.Data()), quota) - } + st.wantData(wantData{ + streamID: 1, + endStream: i == len(reads[1:])-1, + size: quota, + }) } }) } @@ -2273,13 +2112,10 @@ func TestServer_Response_RST_Unblocks_LargeWrite(t *testing.T) { getSlash(st) // make the single request - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + }) if err := st.fr.WriteRSTStream(1, ErrCodeCancel); err != nil { t.Fatal(err) @@ -2301,21 +2137,16 @@ func TestServer_Response_Empty_Data_Not_FlowControlled(t *testing.T) { getSlash(st) // make the single request - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + }) - df := st.wantData() - if got := len(df.Data()); got != 0 { - t.Fatalf("unexpected %d DATA bytes; want 0", got) - } - if !df.StreamEnded() { - t.Fatal("DATA didn't have END_STREAM") - } + st.wantData(wantData{ + streamID: 1, + endStream: true, + size: 0, + }) }) } @@ -2340,49 +2171,33 @@ func TestServer_Response_Automatic100Continue(t *testing.T) { EndStream: false, EndHeaders: true, }) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "100"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Fatalf("Got headers %v; want %v", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"100"}, + }, + }) // Okay, they sent status 100, so we can send our // gigantic and/or sensitive "foo" payload now. st.writeData(1, true, []byte(msg)) - hf = st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("expected data to follow") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - goth = st.decodeHeader(hf.HeaderBlockFragment()) - wanth = [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", strconv.Itoa(len(reply))}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{"text/plain; charset=utf-8"}, + "content-length": []string{strconv.Itoa(len(reply))}, + }, + }) - df := st.wantData() - if string(df.Data()) != reply { - t.Errorf("Client read %q; want %q", df.Data(), reply) - } - if !df.StreamEnded() { - t.Errorf("expect data stream end") - } + st.wantData(wantData{ + streamID: 1, + endStream: true, + data: []byte(reply), + }) }) } @@ -2404,13 +2219,10 @@ func TestServer_HandlerWriteErrorOnDisconnect(t *testing.T) { EndStream: false, EndHeaders: true, }) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("unexpected END_STREAM flag") - } - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + }) // Close the connection and wait for the handler to (hopefully) notice. st.cc.Close() _ = <-errc @@ -2431,6 +2243,11 @@ func TestServer_Rejects_Too_Many_Streams(t *testing.T) { <-leaveHandler }) defer st.Close() + + // Automatically syncing after every write / before every read + // slows this test down substantially. + st.cc.(*synctestNetConn).autoWait = false + st.greet() nextStreamID := uint32(1) streamID := func() uint32 { @@ -2470,11 +2287,16 @@ func TestServer_Rejects_Too_Many_Streams(t *testing.T) { if err := st.fr.WriteContinuation(rejectID, true, frag2); err != nil { t.Fatal(err) } + st.sync() st.wantRSTStream(rejectID, ErrCodeProtocol) // But let a handler finish: leaveHandler <- true - st.wantHeaders() + st.sync() + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) // And now another stream should be able to start: goodID := streamID() @@ -2494,14 +2316,14 @@ func TestServer_Response_ManyHeaders_With_Continuation(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() + hf := readFrame[*HeadersFrame](t, st) if hf.HeadersEnded() { t.Fatal("got unwanted END_HEADERS flag") } n := 0 for { n++ - cf := st.wantContinuation() + cf := readFrame[*ContinuationFrame](t, st) if cf.HeadersEnded() { break } @@ -2530,10 +2352,10 @@ func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { EndStream: false, // DATA is coming EndHeaders: true, }) - hf := st.wantHeaders() - if !hf.HeadersEnded() || !hf.StreamEnded() { - t.Fatalf("want END_HEADERS+END_STREAM, got %v", hf) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) // Sent when the a Handler closes while a client has // indicated it's still sending DATA: @@ -2588,79 +2410,51 @@ func TestServer_NoCrash_HandlerClose_Then_ClientClose(t *testing.T) { func TestServer_Rejects_TLS10(t *testing.T) { testRejectTLS(t, tls.VersionTLS10) } func TestServer_Rejects_TLS11(t *testing.T) { testRejectTLS(t, tls.VersionTLS11) } -func testRejectTLS(t *testing.T, max uint16) { - st := newServerTester(t, nil, func(c *tls.Config) { +func testRejectTLS(t *testing.T, version uint16) { + st := newServerTester(t, nil, func(state *tls.ConnectionState) { // As of 1.18 the default minimum Go TLS version is // 1.2. In order to test rejection of lower versions, - // manually set the minimum version to 1.0 - c.MinVersion = tls.VersionTLS10 - c.MaxVersion = max + // manually set the version to 1.0 + state.Version = version }) defer st.Close() - gf := st.wantGoAway() - if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { - t.Errorf("Got error code %v; want %v", got, want) - } + st.wantGoAway(0, ErrCodeInadequateSecurity) } func TestServer_Rejects_TLSBadCipher(t *testing.T) { - st := newServerTester(t, nil, func(c *tls.Config) { - // All TLS 1.3 ciphers are good. Test with TLS 1.2. - c.MaxVersion = tls.VersionTLS12 - // Only list bad ones: - c.CipherSuites = []uint16{ - tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, - } + st := newServerTester(t, nil, func(state *tls.ConnectionState) { + state.Version = tls.VersionTLS12 + state.CipherSuite = tls.TLS_RSA_WITH_RC4_128_SHA }) defer st.Close() - gf := st.wantGoAway() - if got, want := gf.ErrCode, ErrCodeInadequateSecurity; got != want { - t.Errorf("Got error code %v; want %v", got, want) - } + st.wantGoAway(0, ErrCodeInadequateSecurity) } func TestServer_Advertises_Common_Cipher(t *testing.T) { - const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - st := newServerTester(t, nil, func(c *tls.Config) { - // Have the client only support the one required by the spec. - c.CipherSuites = []uint16{requiredSuite} - }, func(ts *httptest.Server) { - var srv *http.Server = ts.Config + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { + }, func(srv *http.Server) { // Have the server configured with no specific cipher suites. // This tests that Go's defaults include the required one. srv.TLSConfig = nil }) - defer st.Close() - st.greet() -} -func (st *serverTester) onHeaderField(f hpack.HeaderField) { - if f.Name == "date" { - return - } - st.decodedHeaders = append(st.decodedHeaders, [2]string{f.Name, f.Value}) -} + // Have the client only support the one required by the spec. + const requiredSuite = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + tlsConfig := tlsConfigInsecure.Clone() + tlsConfig.MaxVersion = tls.VersionTLS12 + tlsConfig.CipherSuites = []uint16{requiredSuite} + tr := &Transport{TLSClientConfig: tlsConfig} + defer tr.CloseIdleConnections() -func (st *serverTester) decodeHeader(headerBlock []byte) (pairs [][2]string) { - st.decodedHeaders = nil - if _, err := st.hpackDec.Write(headerBlock); err != nil { - st.t.Fatalf("hpack decoding error: %v", err) + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) } - if err := st.hpackDec.Close(); err != nil { - st.t.Fatalf("hpack decoding error: %v", err) + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) } - return st.decodedHeaders + res.Body.Close() } // testServerResponse sets up an idle HTTP/2 connection. The client function should @@ -2804,19 +2598,15 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) { st.fr.WriteContinuation(1, len(b) == 0, chunk) } - h := st.wantHeaders() - if !h.HeadersEnded() { - t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) - } - headers := st.decodeHeader(h.HeaderBlockFragment()) - want := [][2]string{ - {":status", "431"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", "63"}, - } - if !reflect.DeepEqual(headers, want) { - t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"431"}, + "content-type": []string{"text/html; charset=utf-8"}, + "content-length": []string{"63"}, + }, + }) } func TestServer_Response_Stream_With_Missing_Trailer(t *testing.T) { @@ -2825,17 +2615,15 @@ func TestServer_Response_Stream_With_Missing_Trailer(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - if !hf.HeadersEnded() { - t.Fatal("want END_HEADERS flag") - } - df := st.wantData() - if len(df.data) != 0 { - t.Fatal("did not want data") - } - if !df.StreamEnded() { - t.Fatal("want END_STREAM flag") - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + }) + st.wantData(wantData{ + streamID: 1, + endStream: true, + size: 0, + }) }) } @@ -2844,8 +2632,8 @@ func TestCompressionErrorOnWrite(t *testing.T) { var serverConfig *http.Server st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { // No response body. - }, func(ts *httptest.Server) { - serverConfig = ts.Config + }, func(s *http.Server) { + serverConfig = s serverConfig.MaxHeaderBytes = maxStrLen }) st.addLogFilter("connection error: COMPRESSION_ERROR") @@ -2873,20 +2661,16 @@ func TestCompressionErrorOnWrite(t *testing.T) { EndStream: true, EndHeaders: true, }) - h := st.wantHeaders() - if !h.HeadersEnded() { - t.Fatalf("Got HEADERS without END_HEADERS set: %v", h) - } - headers := st.decodeHeader(h.HeaderBlockFragment()) - want := [][2]string{ - {":status", "431"}, - {"content-type", "text/html; charset=utf-8"}, - {"content-length", "63"}, - } - if !reflect.DeepEqual(headers, want) { - t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) - } - df := st.wantData() + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"431"}, + "content-type": []string{"text/html; charset=utf-8"}, + "content-length": []string{"63"}, + }, + }) + df := readFrame[*DataFrame](t, st) if !strings.Contains(string(df.Data()), "HTTP Error 431") { t.Errorf("Unexpected data body: %q", df.Data()) } @@ -2902,10 +2686,7 @@ func TestCompressionErrorOnWrite(t *testing.T) { EndStream: true, EndHeaders: true, }) - ga := st.wantGoAway() - if ga.ErrCode != ErrCodeCompression { - t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) - } + st.wantGoAway(3, ErrCodeCompression) } func TestCompressionErrorOnClose(t *testing.T) { @@ -2924,10 +2705,7 @@ func TestCompressionErrorOnClose(t *testing.T) { EndStream: true, EndHeaders: true, }) - ga := st.wantGoAway() - if ga.ErrCode != ErrCodeCompression { - t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode) - } + st.wantGoAway(1, ErrCodeCompression) } // test that a server handler can read trailers from a client @@ -2962,7 +2740,7 @@ func TestServerReadsTrailers(t *testing.T) { if !reflect.DeepEqual(r.Trailer, wantTrailer) { t.Errorf("initial Trailer = %v; want %v", r.Trailer, wantTrailer) } - slurp, err := ioutil.ReadAll(r.Body) + slurp, err := io.ReadAll(r.Body) if string(slurp) != testBody { t.Errorf("read body %q; want %q", slurp, testBody) } @@ -3016,67 +2794,52 @@ func testServerWritesTrailers(t *testing.T, withFlush bool) { w.Header().Set("Trailer", "should not be included; Forbidden by RFC 7230 4.1.2") return nil }, func(st *serverTester) { - getSlash(st) - hf := st.wantHeaders() - if hf.StreamEnded() { - t.Fatal("response HEADERS had END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("response HEADERS didn't have END_HEADERS") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"foo", "Bar"}, - {"trailer", "Server-Trailer-A, Server-Trailer-B"}, - {"trailer", "Server-Trailer-C"}, - {"trailer", "Transfer-Encoding, Content-Length, Trailer"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", "5"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) - } - df := st.wantData() - if string(df.Data()) != "Hello" { - t.Fatalf("Client read %q; want Hello", df.Data()) - } - if df.StreamEnded() { - t.Fatalf("data frame had STREAM_ENDED") - } - tf := st.wantHeaders() // for the trailers - if !tf.StreamEnded() { - t.Fatalf("trailers HEADERS lacked END_STREAM") - } - if !tf.HeadersEnded() { - t.Fatalf("trailers HEADERS lacked END_HEADERS") - } - wanth = [][2]string{ - {"post-header-trailer", "hi1"}, - {"post-header-trailer2", "hi2"}, - {"server-trailer-a", "valuea"}, - {"server-trailer-c", "valuec"}, - } - goth = st.decodeHeader(tf.HeaderBlockFragment()) - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) - } + getSlash(st) + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "foo": []string{"Bar"}, + "trailer": []string{ + "Server-Trailer-A, Server-Trailer-B", + "Server-Trailer-C", + "Transfer-Encoding, Content-Length, Trailer", + }, + "content-type": []string{"text/plain; charset=utf-8"}, + "content-length": []string{"5"}, + }, + }) + st.wantData(wantData{ + streamID: 1, + endStream: false, + data: []byte("Hello"), + }) + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + header: http.Header{ + "post-header-trailer": []string{"hi1"}, + "post-header-trailer2": []string{"hi2"}, + "server-trailer-a": []string{"valuea"}, + "server-trailer-c": []string{"valuec"}, + }, + }) }) } func TestServerWritesUndeclaredTrailers(t *testing.T) { const trailer = "Trailer-Header" const value = "hi1" - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.Header().Set(http.TrailerPrefix+trailer, value) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() cl := &http.Client{Transport: tr} - resp, err := cl.Get(st.ts.URL) + resp, err := cl.Get(ts.URL) if err != nil { t.Fatal(err) } @@ -3099,31 +2862,24 @@ func TestServerDoesntWriteInvalidHeaders(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - if !hf.StreamEnded() { - t.Error("response HEADERS lacked END_STREAM") - } - if !hf.HeadersEnded() { - t.Fatal("response HEADERS didn't have END_HEADERS") - } - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"ok1", "x"}, - {"content-length", "0"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Header mismatch.\n got: %v\nwant: %v", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + header: http.Header{ + ":status": []string{"200"}, + "ok1": []string{"x"}, + "content-length": []string{"0"}, + }, + }) }) } func BenchmarkServerGets(b *testing.B) { - defer disableGoroutineTracking()() + disableGoroutineTracking(b) b.ReportAllocs() const msg = "Hello, world" - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, msg) }) defer st.Close() @@ -3142,24 +2898,23 @@ func BenchmarkServerGets(b *testing.B) { EndStream: true, EndHeaders: true, }) - st.wantHeaders() - df := st.wantData() - if !df.StreamEnded() { + st.wantFrameType(FrameHeaders) + if df := readFrame[*DataFrame](b, st); !df.StreamEnded() { b.Fatalf("DATA didn't have END_STREAM; got %v", df) } } } func BenchmarkServerPosts(b *testing.B) { - defer disableGoroutineTracking()() + disableGoroutineTracking(b) b.ReportAllocs() const msg = "Hello, world" - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) { // Consume the (empty) body from th peer before replying, otherwise // the server will sometimes (depending on scheduling) send the peer a // a RST_STREAM with the CANCEL error code. - if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + if n, err := io.Copy(io.Discard, r.Body); n != 0 || err != nil { b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) } io.WriteString(w, msg) @@ -3181,9 +2936,8 @@ func BenchmarkServerPosts(b *testing.B) { EndHeaders: true, }) st.writeData(id, true, nil) - st.wantHeaders() - df := st.wantData() - if !df.StreamEnded() { + st.wantFrameType(FrameHeaders) + if df := readFrame[*DataFrame](b, st); !df.StreamEnded() { b.Fatalf("DATA didn't have END_STREAM; got %v", df) } } @@ -3203,7 +2957,7 @@ func BenchmarkServerToClientStreamReuseFrames(b *testing.B) { } func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { - defer disableGoroutineTracking()() + disableGoroutineTracking(b) b.ReportAllocs() const msgLen = 1 // default window size @@ -3219,11 +2973,11 @@ func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { return msg } - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { + st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) { // Consume the (empty) body from th peer before replying, otherwise // the server will sometimes (depending on scheduling) send the peer a // a RST_STREAM with the CANCEL error code. - if n, err := io.Copy(ioutil.Discard, r.Body); n != 0 || err != nil { + if n, err := io.Copy(io.Discard, r.Body); n != 0 || err != nil { b.Errorf("Copy error; got %v, %v; want 0, nil", n, err) } for i := 0; i < b.N; i += 1 { @@ -3244,18 +2998,22 @@ func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { }) st.writeData(id, true, nil) - st.wantHeaders() + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + }) var pendingWindowUpdate = uint32(0) for i := 0; i < b.N; i += 1 { expected := nextMsg(i) - df := st.wantData() - if bytes.Compare(expected, df.data) != 0 { - b.Fatalf("Bad message received; want %v; got %v", expected, df.data) - } + st.wantData(wantData{ + streamID: 1, + endStream: false, + data: expected, + }) // try to send infrequent but large window updates so they don't overwhelm the test - pendingWindowUpdate += uint32(len(df.data)) + pendingWindowUpdate += uint32(len(expected)) if pendingWindowUpdate >= windowSize/2 { if err := st.fr.WriteWindowUpdate(0, pendingWindowUpdate); err != nil { b.Fatal(err) @@ -3266,10 +3024,10 @@ func benchmarkServerToClientStream(b *testing.B, newServerOpts ...interface{}) { pendingWindowUpdate = 0 } } - df := st.wantData() - if !df.StreamEnded() { - b.Fatalf("DATA didn't have END_STREAM; got %v", df) - } + st.wantData(wantData{ + streamID: 1, + endStream: true, + }) } // go-fuzz bug, originally reported at https://github.com/bradfitz/http2/issues/53 @@ -3433,14 +3191,13 @@ func TestServerNoAutoContentLengthOnHead(t *testing.T) { EndStream: true, EndHeaders: true, }) - h := st.wantHeaders() - headers := st.decodeHeader(h.HeaderBlockFragment()) - want := [][2]string{ - {":status", "200"}, - } - if !reflect.DeepEqual(headers, want) { - t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + header: http.Header{ + ":status": []string{"200"}, + }, + }) } // golang.org/issue/13495 @@ -3457,16 +3214,15 @@ func TestServerNoDuplicateContentType(t *testing.T) { EndStream: true, EndHeaders: true, }) - h := st.wantHeaders() - headers := st.decodeHeader(h.HeaderBlockFragment()) - want := [][2]string{ - {":status", "200"}, - {"content-type", ""}, - {"content-length", "41"}, - } - if !reflect.DeepEqual(headers, want) { - t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{""}, + "content-length": []string{"41"}, + }, + }) } func TestServerContentLengthCanBeDisabled(t *testing.T) { @@ -3482,29 +3238,28 @@ func TestServerContentLengthCanBeDisabled(t *testing.T) { EndStream: true, EndHeaders: true, }) - h := st.wantHeaders() - headers := st.decodeHeader(h.HeaderBlockFragment()) - want := [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, - } - if !reflect.DeepEqual(headers, want) { - t.Errorf("Headers mismatch.\n got: %q\nwant: %q\n", headers, want) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{"text/plain; charset=utf-8"}, + }, + }) } -func disableGoroutineTracking() (restore func()) { +func disableGoroutineTracking(t testing.TB) { old := DebugGoroutines DebugGoroutines = false - return func() { DebugGoroutines = old } + t.Cleanup(func() { DebugGoroutines = old }) } func BenchmarkServer_GetRequest(b *testing.B) { - defer disableGoroutineTracking()() + disableGoroutineTracking(b) b.ReportAllocs() const msg = "Hello, world." - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { - n, err := io.Copy(ioutil.Discard, r.Body) + st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(io.Discard, r.Body) if err != nil || n > 0 { b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) } @@ -3526,17 +3281,17 @@ func BenchmarkServer_GetRequest(b *testing.B) { EndStream: true, EndHeaders: true, }) - st.wantHeaders() - st.wantData() + st.wantFrameType(FrameHeaders) + st.wantFrameType(FrameData) } } func BenchmarkServer_PostRequest(b *testing.B) { - defer disableGoroutineTracking()() + disableGoroutineTracking(b) b.ReportAllocs() const msg = "Hello, world." - st := newServerTester(b, func(w http.ResponseWriter, r *http.Request) { - n, err := io.Copy(ioutil.Discard, r.Body) + st := newServerTesterWithRealConn(b, func(w http.ResponseWriter, r *http.Request) { + n, err := io.Copy(io.Discard, r.Body) if err != nil || n > 0 { b.Errorf("Read %d bytes, error %v; want 0 bytes.", n, err) } @@ -3558,8 +3313,8 @@ func BenchmarkServer_PostRequest(b *testing.B) { EndHeaders: true, }) st.writeData(streamID, true, nil) - st.wantHeaders() - st.wantData() + st.wantFrameType(FrameHeaders) + st.wantFrameType(FrameData) } } @@ -3610,7 +3365,7 @@ func TestServerHandleCustomConn(t *testing.T) { EndStream: true, EndHeaders: true, }) - go io.Copy(ioutil.Discard, c2) + go io.Copy(io.Discard, c2) <-handlerDone }() const testString = "my custom ConnectionState" @@ -3644,17 +3399,16 @@ func TestServer_Rejects_ConnHeaders(t *testing.T) { defer st.Close() st.greet() st.bodylessReq1("connection", "foo") - hf := st.wantHeaders() - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "400"}, - {"content-type", "text/plain; charset=utf-8"}, - {"x-content-type-options", "nosniff"}, - {"content-length", "51"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"400"}, + "content-type": []string{"text/plain; charset=utf-8"}, + "x-content-type-options": []string{"nosniff"}, + "content-length": []string{"51"}, + }, + }) } type hpackEncoder struct { @@ -3731,7 +3485,7 @@ func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { doRead := make(chan bool, 1) defer close(doRead) // fallback cleanup - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, msg) w.(http.Flusher).Flush() @@ -3740,14 +3494,12 @@ func TestExpect100ContinueAfterHandlerWrites(t *testing.T) { r.Body.Read(make([]byte, 10)) io.WriteString(w, msg2) - - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - req, _ := http.NewRequest("POST", st.ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) + req, _ := http.NewRequest("POST", ts.URL, io.LimitReader(neverEnding('A'), 2<<20)) req.Header.Set("Expect", "100-continue") res, err := tr.RoundTrip(req) @@ -3808,14 +3560,13 @@ func TestUnreadFlowControlReturned_Server(t *testing.T) { unblock := make(chan bool, 1) defer close(unblock) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { // Don't read the 16KB request body. Wait until the client's // done sending it and then return. This should cause the Server // to then return those 16KB of flow control to the client. tt.reqFn(r) <-unblock - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() @@ -3833,7 +3584,7 @@ func TestUnreadFlowControlReturned_Server(t *testing.T) { return 0, io.EOF }), ) - req, _ := http.NewRequest("POST", st.ts.URL, body) + req, _ := http.NewRequest("POST", ts.URL, body) res, err := tr.RoundTrip(req) if err != nil { t.Fatal(tt.name, err) @@ -3862,12 +3613,18 @@ func TestServerReturnsStreamAndConnFlowControlOnBodyClose(t *testing.T) { BlockFragment: st.encodeHeader(), EndHeaders: true, }) - st.wantHeaders() + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + }) const size = inflowMinRefresh // enough to trigger flow control return st.writeData(1, false, make([]byte, size)) st.wantWindowUpdate(0, size) // conn-level flow control is returned unblockHandler <- struct{}{} - st.wantData() + st.wantData(wantData{ + streamID: 1, + endStream: true, + }) } func TestServerIdleTimeout(t *testing.T) { @@ -3882,22 +3639,24 @@ func TestServerIdleTimeout(t *testing.T) { defer st.Close() st.greet() - ga := st.wantGoAway() - if ga.ErrCode != ErrCodeNo { - t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) - } + st.advance(500 * time.Millisecond) + st.wantGoAway(0, ErrCodeNo) } func TestServerIdleTimeout_AfterRequest(t *testing.T) { if testing.Short() { t.Skip("skipping in short mode") } - const timeout = 250 * time.Millisecond + const ( + requestTimeout = 2 * time.Second + idleTimeout = 1 * time.Second + ) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - time.Sleep(timeout * 2) + var st *serverTester + st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + st.group.Sleep(requestTimeout) }, func(h2s *Server) { - h2s.IdleTimeout = timeout + h2s.IdleTimeout = idleTimeout }) defer st.Close() @@ -3906,14 +3665,16 @@ func TestServerIdleTimeout_AfterRequest(t *testing.T) { // Send a request which takes twice the timeout. Verifies the // idle timeout doesn't fire while we're in a request: st.bodylessReq1() - st.wantHeaders() + st.advance(requestTimeout) + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) // But the idle timeout should be rearmed after the request // is done: - ga := st.wantGoAway() - if ga.ErrCode != ErrCodeNo { - t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) - } + st.advance(idleTimeout) + st.wantGoAway(1, ErrCodeNo) } // grpc-go closes the Request.Body currently with a Read. @@ -3949,22 +3710,21 @@ func TestIssue20704Race(t *testing.T) { itemCount = 100 ) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { for i := 0; i < itemCount; i++ { _, err := w.Write(make([]byte, itemSize)) if err != nil { return } } - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() cl := &http.Client{Transport: tr} for i := 0; i < 1000; i++ { - resp, err := cl.Get(st.ts.URL) + resp, err := cl.Get(ts.URL) if err != nil { t.Fatal(err) } @@ -3976,7 +3736,7 @@ func TestIssue20704Race(t *testing.T) { func TestServer_Rejects_TooSmall(t *testing.T) { testServerResponse(t, func(w http.ResponseWriter, r *http.Request) error { - ioutil.ReadAll(r.Body) + io.ReadAll(r.Body) return nil }, func(st *serverTester) { st.writeHeaders(HeadersFrameParam{ @@ -4016,13 +3776,10 @@ func TestServerHandlerConnectionClose(t *testing.T) { var sawRes bool var sawWindowUpdate bool for { - f, err := st.readFrame() - if err == io.EOF { + f := st.readFrame() + if f == nil { break } - if err != nil { - t.Fatal(err) - } switch f := f.(type) { case *GoAwayFrame: sawGoAway = true @@ -4074,6 +3831,8 @@ func TestServerHandlerConnectionClose(t *testing.T) { } sawWindowUpdate = true unblockHandler <- true + st.sync() + st.advance(goAwayTimeout) default: t.Logf("unexpected frame: %v", summarizeFrame(f)) } @@ -4139,20 +3898,9 @@ func TestServer_Headers_HalfCloseRemote(t *testing.T) { } func TestServerGracefulShutdown(t *testing.T) { - var st *serverTester handlerDone := make(chan struct{}) - st = newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - defer close(handlerDone) - go st.ts.Config.Shutdown(context.Background()) - - ga := st.wantGoAway() - if ga.ErrCode != ErrCodeNo { - t.Errorf("GOAWAY error = %v; want ErrCodeNo", ga.ErrCode) - } - if ga.LastStreamID != 1 { - t.Errorf("GOAWAY LastStreamID = %v; want 1", ga.LastStreamID) - } - + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + <-handlerDone w.Header().Set("x-foo", "bar") }) defer st.Close() @@ -4160,17 +3908,23 @@ func TestServerGracefulShutdown(t *testing.T) { st.greet() st.bodylessReq1() - <-handlerDone - hf := st.wantHeaders() - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "200"}, - {"x-foo", "bar"}, - {"content-length", "0"}, - } - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got headers %v; want %v", goth, wanth) - } + st.sync() + st.h1server.Shutdown(context.Background()) + + st.wantGoAway(1, ErrCodeNo) + + close(handlerDone) + st.sync() + + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + header: http.Header{ + ":status": []string{"200"}, + "x-foo": []string{"bar"}, + "content-length": []string{"0"}, + }, + }) n, err := st.cc.Read([]byte{0}) if n != 0 || err == nil { @@ -4241,26 +3995,25 @@ func TestContentEncodingNoSniffing(t *testing.T) { for _, tt := range resps { t.Run(tt.name, func(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { if tt.contentEncoding != nil { w.Header().Set("Content-Encoding", tt.contentEncoding.(string)) } w.Write(tt.body) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - req, _ := http.NewRequest("GET", st.ts.URL, nil) + req, _ := http.NewRequest("GET", ts.URL, nil) res, err := tr.RoundTrip(req) if err != nil { - t.Fatalf("GET %s: %v", st.ts.URL, err) + t.Fatalf("GET %s: %v", ts.URL, err) } defer res.Body.Close() g := res.Header.Get("Content-Encoding") - t.Logf("%s: Content-Encoding: %s", st.ts.URL, g) + t.Logf("%s: Content-Encoding: %s", ts.URL, g) if w := tt.contentEncoding; g != w { if w != nil { // The case where contentEncoding was set explicitly. @@ -4274,7 +4027,7 @@ func TestContentEncodingNoSniffing(t *testing.T) { if w := tt.wantContentType; g != w { t.Errorf("Content-Type mismatch\n\tgot: %q\n\twant: %q", g, w) } - t.Logf("%s: Content-Type: %s", st.ts.URL, g) + t.Logf("%s: Content-Type: %s", ts.URL, g) }) } } @@ -4322,13 +4075,10 @@ func TestServerWindowUpdateOnBodyClose(t *testing.T) { // Wait for flow control credit for the portion of the request written so far. increments := windowSize / 2 for { - f, err := st.readFrame() - if err == io.EOF { + f := st.readFrame() + if f == nil { break } - if err != nil { - t.Fatal(err) - } if wu, ok := f.(*WindowUpdateFrame); ok && wu.StreamID == 0 { increments -= int(wu.Increment) if increments == 0 { @@ -4362,24 +4112,16 @@ func TestNoErrorLoggedOnPostAfterGOAWAY(t *testing.T) { EndStream: false, EndHeaders: true, }) - st.wantHeaders() + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) st.sc.startGracefulShutdown() - for { - f, err := st.readFrame() - if err == io.EOF { - st.t.Fatal("got a EOF; want *GoAwayFrame") - } - if err != nil { - t.Fatal(err) - } - if gf, ok := f.(*GoAwayFrame); ok && gf.StreamID == 0 { - break - } - } + st.wantRSTStream(1, ErrCodeNo) + st.wantGoAway(1, ErrCodeNo) st.writeData(1, true, []byte(content)) - time.Sleep(200 * time.Millisecond) st.Close() if bytes.Contains(st.serverLogBuf.Bytes(), []byte("PROTOCOL_ERROR")) { @@ -4395,27 +4137,22 @@ func TestServerSendsProcessing(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "102"}, - } - - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got = %q; want %q", goth, wanth) - } - - hf = st.wantHeaders() - goth = st.decodeHeader(hf.HeaderBlockFragment()) - wanth = [][2]string{ - {":status", "200"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", "5"}, - } - - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got = %q; want %q", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"102"}, + }, + }) + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "content-type": []string{"text/plain; charset=utf-8"}, + "content-length": []string{"5"}, + }, + }) }) } @@ -4435,45 +4172,43 @@ func TestServerSendsEarlyHints(t *testing.T) { return nil }, func(st *serverTester) { getSlash(st) - hf := st.wantHeaders() - goth := st.decodeHeader(hf.HeaderBlockFragment()) - wanth := [][2]string{ - {":status", "103"}, - {"link", "; rel=preload; as=style"}, - {"link", "; rel=preload; as=script"}, - } - - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got = %q; want %q", goth, wanth) - } - - hf = st.wantHeaders() - goth = st.decodeHeader(hf.HeaderBlockFragment()) - wanth = [][2]string{ - {":status", "103"}, - {"link", "; rel=preload; as=style"}, - {"link", "; rel=preload; as=script"}, - {"link", "; rel=preload; as=script"}, - } - - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got = %q; want %q", goth, wanth) - } - - hf = st.wantHeaders() - goth = st.decodeHeader(hf.HeaderBlockFragment()) - wanth = [][2]string{ - {":status", "200"}, - {"link", "; rel=preload; as=style"}, - {"link", "; rel=preload; as=script"}, - {"link", "; rel=preload; as=script"}, - {"content-type", "text/plain; charset=utf-8"}, - {"content-length", "123"}, - } - - if !reflect.DeepEqual(goth, wanth) { - t.Errorf("Got = %q; want %q", goth, wanth) - } + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"103"}, + "link": []string{ + "; rel=preload; as=style", + "; rel=preload; as=script", + }, + }, + }) + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"103"}, + "link": []string{ + "; rel=preload; as=style", + "; rel=preload; as=script", + "; rel=preload; as=script", + }, + }, + }) + st.wantHeaders(wantHeader{ + streamID: 1, + endStream: false, + header: http.Header{ + ":status": []string{"200"}, + "link": []string{ + "; rel=preload; as=style", + "; rel=preload; as=script", + "; rel=preload; as=script", + }, + "content-type": []string{"text/plain; charset=utf-8"}, + "content-length": []string{"123"}, + }, + }) }) } @@ -4495,7 +4230,6 @@ func TestProtocolErrorAfterGoAway(t *testing.T) { EndHeaders: true, }) st.writeData(1, false, []byte(content[:5])) - st.writeReadPing() // Send a GOAWAY with ErrCodeNo, followed by a bogus window update. // The server should close the connection. @@ -4506,14 +4240,9 @@ func TestProtocolErrorAfterGoAway(t *testing.T) { t.Fatal(err) } - for { - if _, err := st.readFrame(); err != nil { - if err != io.EOF { - t.Errorf("unexpected readFrame error: %v", err) - } - break - } - } + st.advance(goAwayTimeout) + st.wantGoAway(1, ErrCodeNo) + st.wantClosed() } func TestServerInitialFlowControlWindow(t *testing.T) { @@ -4534,9 +4263,9 @@ func TestServerInitialFlowControlWindow(t *testing.T) { }, func(s *Server) { s.MaxUploadBufferPerConnection = want }) - defer st.Close() st.writePreface() - st.writeInitialSettings() + st.writeSettings() + _ = readFrame[*SettingsFrame](t, st) st.writeSettingsAck() st.writeHeaders(HeadersFrameParam{ StreamID: 1, @@ -4547,10 +4276,7 @@ func TestServerInitialFlowControlWindow(t *testing.T) { window := 65535 Frames: for { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) - } + f := st.readFrame() switch f := f.(type) { case *WindowUpdateFrame: if f.FrameHeader.StreamID != 0 { @@ -4560,6 +4286,8 @@ func TestServerInitialFlowControlWindow(t *testing.T) { window += int(f.Increment) case *HeadersFrame: break Frames + case nil: + break Frames default: } } @@ -4578,13 +4306,16 @@ func TestCanonicalHeaderCacheGrowth(t *testing.T) { sc := &serverConn{ serveG: newGoroutineLock(), } - const count = 1000 - for i := 0; i < count; i++ { - h := fmt.Sprintf("%v-%v", base, i) + count := 0 + added := 0 + for added < 10*maxCachedCanonicalHeadersKeysSize { + h := fmt.Sprintf("%v-%v", base, count) c := sc.canonicalHeader(h) if len(h) != len(c) { t.Errorf("sc.canonicalHeader(%q) = %q, want same length", h, c) } + count++ + added += len(h) } total := 0 for k, v := range sc.canonHeader { @@ -4603,7 +4334,7 @@ func TestCanonicalHeaderCacheGrowth(t *testing.T) { // We should not access the slice after this point. func TestServerWriteDoesNotRetainBufferAfterReturn(t *testing.T) { donec := make(chan struct{}) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { defer close(donec) buf := make([]byte, 1<<20) var i byte @@ -4617,13 +4348,12 @@ func TestServerWriteDoesNotRetainBufferAfterReturn(t *testing.T) { return } } - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - req, _ := http.NewRequest("GET", st.ts.URL, nil) + req, _ := http.NewRequest("GET", ts.URL, nil) res, err := tr.RoundTrip(req) if err != nil { t.Fatal(err) @@ -4639,7 +4369,7 @@ func TestServerWriteDoesNotRetainBufferAfterReturn(t *testing.T) { // We should not access the slice after this point. func TestServerWriteDoesNotRetainBufferAfterServerClose(t *testing.T) { donec := make(chan struct{}, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { donec <- struct{}{} defer close(donec) buf := make([]byte, 1<<20) @@ -4654,20 +4384,19 @@ func TestServerWriteDoesNotRetainBufferAfterServerClose(t *testing.T) { return } } - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - req, _ := http.NewRequest("GET", st.ts.URL, nil) + req, _ := http.NewRequest("GET", ts.URL, nil) res, err := tr.RoundTrip(req) if err != nil { t.Fatal(err) } defer res.Body.Close() <-donec - st.ts.Config.Close() + ts.Config.Close() <-donec } @@ -4694,9 +4423,7 @@ func TestServerMaxHandlerGoroutines(t *testing.T) { }) defer st.Close() - st.writePreface() - st.writeInitialSettings() - st.writeSettingsAck() + st.greet() // Make maxHandlers concurrent requests. // Reset them all, but only after the handler goroutines have started. @@ -4763,23 +4490,244 @@ func TestServerMaxHandlerGoroutines(t *testing.T) { st.fr.WriteRSTStream(streamID, ErrCodeCancel) streamID += 2 } -Frames: + fr := readFrame[*GoAwayFrame](t, st) + if fr.ErrCode != ErrCodeEnhanceYourCalm { + t.Errorf("err code = %v; want %v", fr.ErrCode, ErrCodeEnhanceYourCalm) + } + + for _, s := range stops { + close(s) + } +} + +func TestServerContinuationFlood(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + fmt.Println(r.Header) + }, func(s *http.Server) { + s.MaxHeaderBytes = 4096 + }) + defer st.Close() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + }) + for i := 0; i < 1000; i++ { + st.fr.WriteContinuation(1, false, st.encodeHeaderRaw( + fmt.Sprintf("x-%v", i), "1234567890", + )) + } + st.fr.WriteContinuation(1, true, st.encodeHeaderRaw( + "x-last-header", "1", + )) + for { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) + f := st.readFrame() + if f == nil { + break } switch f := f.(type) { + case *HeadersFrame: + t.Fatalf("received HEADERS frame; want GOAWAY and a closed connection") case *GoAwayFrame: - if f.ErrCode != ErrCodeEnhanceYourCalm { - t.Errorf("err code = %v; want %v", f.ErrCode, ErrCodeEnhanceYourCalm) + // We might not see the GOAWAY (see below), but if we do it should + // indicate that the server processed this request so the client doesn't + // attempt to retry it. + if got, want := f.LastStreamID, uint32(1); got != want { + t.Errorf("received GOAWAY with LastStreamId %v, want %v", got, want) } - break Frames - default: + + } + } + // We expect to have seen a GOAWAY before the connection closes, + // but the server will close the connection after one second + // whether or not it has finished sending the GOAWAY. On windows-amd64-race + // builders, this fairly consistently results in the connection closing without + // the GOAWAY being sent. + // + // Since the server's behavior is inherently racy here and the important thing + // is that the connection is closed, don't check for the GOAWAY having been sent. +} + +func TestServerContinuationAfterInvalidHeader(t *testing.T) { + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + fmt.Println(r.Header) + }) + defer st.Close() + + st.greet() + + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + }) + st.fr.WriteContinuation(1, false, st.encodeHeaderRaw( + "x-invalid-header", "\x00", + )) + st.fr.WriteContinuation(1, true, st.encodeHeaderRaw( + "x-valid-header", "1", + )) + + var sawGoAway bool + for { + f := st.readFrame() + if f == nil { + break + } + switch f.(type) { + case *GoAwayFrame: + sawGoAway = true + case *HeadersFrame: + t.Fatalf("received HEADERS frame; want GOAWAY") } } + if !sawGoAway { + t.Errorf("connection closed with no GOAWAY frame; want one") + } +} - for _, s := range stops { - close(s) +func TestServerUpgradeRequestPrefaceFailure(t *testing.T) { + // An h2c upgrade request fails when the client preface is not as expected. + s2 := &Server{ + // Setting IdleTimeout triggers #67168. + IdleTimeout: 60 * time.Minute, + } + c1, c2 := net.Pipe() + donec := make(chan struct{}) + go func() { + defer close(donec) + s2.ServeConn(c1, &ServeConnOpts{ + UpgradeRequest: httptest.NewRequest("GET", "/", nil), + }) + }() + // The server expects to see the HTTP/2 preface, + // but we close the connection instead. + c2.Close() + <-donec +} + +// Issue 67036: A stream error should result in the handler's request context being canceled. +func TestServerRequestCancelOnError(t *testing.T) { + recvc := make(chan struct{}) // handler has started + donec := make(chan struct{}) // handler has finished + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + close(recvc) + <-r.Context().Done() + close(donec) + }) + defer st.Close() + + st.greet() + + // Client sends request headers, handler starts. + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + <-recvc + + // Client sends an invalid second set of request headers. + // The stream is reset. + // The handler's context is canceled, and the handler exits. + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + <-donec +} + +func TestServerSetReadWriteDeadlineRace(t *testing.T) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { + ctl := http.NewResponseController(w) + ctl.SetReadDeadline(time.Now().Add(3600 * time.Second)) + ctl.SetWriteDeadline(time.Now().Add(3600 * time.Second)) + }) + resp, err := ts.Client().Get(ts.URL) + if err != nil { + t.Fatal(err) + } + resp.Body.Close() +} + +func TestServerWriteByteTimeout(t *testing.T) { + const timeout = 1 * time.Second + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + w.Write(make([]byte, 100)) + }, func(s *Server) { + s.WriteByteTimeout = timeout + }) + st.greet() + + st.cc.(*synctestNetConn).SetReadBufferSize(1) // write one byte at a time + st.writeHeaders(HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(), + EndStream: true, + EndHeaders: true, + }) + + // Read a few bytes, staying just under WriteByteTimeout. + for i := 0; i < 10; i++ { + st.advance(timeout - 1) + if n, err := st.cc.Read(make([]byte, 1)); n != 1 || err != nil { + t.Fatalf("read %v: %v, %v; want 1, nil", i, n, err) + } } + + // Wait for WriteByteTimeout. + // The connection should close. + st.advance(1 * time.Second) // timeout after writing one byte + st.advance(1 * time.Second) // timeout after failing to write any more bytes + st.wantClosed() +} + +func TestServerPingSent(t *testing.T) { + const readIdleTimeout = 15 * time.Second + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + }, func(s *Server) { + s.ReadIdleTimeout = readIdleTimeout + }) + st.greet() + + st.wantIdle() + + st.advance(readIdleTimeout) + _ = readFrame[*PingFrame](t, st) + st.wantIdle() + + st.advance(14 * time.Second) + st.wantIdle() + st.advance(1 * time.Second) + st.wantClosed() +} + +func TestServerPingResponded(t *testing.T) { + const readIdleTimeout = 15 * time.Second + st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + }, func(s *Server) { + s.ReadIdleTimeout = readIdleTimeout + }) + st.greet() + + st.wantIdle() + + st.advance(readIdleTimeout) + pf := readFrame[*PingFrame](t, st) + st.wantIdle() + + st.advance(14 * time.Second) + st.wantIdle() + + st.writePing(true, pf.Data) + + st.advance(2 * time.Second) + st.wantIdle() } diff --git a/pkg/http2/sync_test.go b/pkg/http2/sync_test.go new file mode 100644 index 0000000..aeddbd6 --- /dev/null +++ b/pkg/http2/sync_test.go @@ -0,0 +1,293 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "context" + "fmt" + "runtime" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +// A synctestGroup synchronizes between a set of cooperating goroutines. +type synctestGroup struct { + mu sync.Mutex + gids map[int]bool + now time.Time + timers map[*fakeTimer]struct{} +} + +type goroutine struct { + id int + parent int + state string +} + +// newSynctest creates a new group with the synthetic clock set the provided time. +func newSynctest(now time.Time) *synctestGroup { + return &synctestGroup{ + gids: map[int]bool{ + currentGoroutine(): true, + }, + now: now, + } +} + +// Join adds the current goroutine to the group. +func (g *synctestGroup) Join() { + g.mu.Lock() + defer g.mu.Unlock() + g.gids[currentGoroutine()] = true +} + +// Count returns the number of goroutines in the group. +func (g *synctestGroup) Count() int { + gs := stacks(true) + count := 0 + for _, gr := range gs { + if !g.gids[gr.id] && !g.gids[gr.parent] { + continue + } + count++ + } + return count +} + +// Close calls t.Fatal if the group contains any running goroutines. +func (g *synctestGroup) Close(t testing.TB) { + if count := g.Count(); count != 1 { + buf := make([]byte, 16*1024) + n := runtime.Stack(buf, true) + t.Logf("stacks:\n%s", buf[:n]) + t.Fatalf("%v goroutines still running after test completed, expect 1", count) + } +} + +// Wait blocks until every goroutine in the group and their direct children are idle. +func (g *synctestGroup) Wait() { + for i := 0; ; i++ { + if g.idle() { + return + } + runtime.Gosched() + } +} + +func (g *synctestGroup) idle() bool { + gs := stacks(true) + g.mu.Lock() + defer g.mu.Unlock() + for _, gr := range gs[1:] { + if !g.gids[gr.id] && !g.gids[gr.parent] { + continue + } + // From runtime/runtime2.go. + switch gr.state { + case "IO wait": + case "chan receive (nil chan)": + case "chan send (nil chan)": + case "select": + case "select (no cases)": + case "chan receive": + case "chan send": + case "sync.Cond.Wait": + case "sync.Mutex.Lock": + case "sync.RWMutex.RLock": + case "sync.RWMutex.Lock": + default: + return false + } + } + return true +} + +func currentGoroutine() int { + s := stacks(false) + return s[0].id +} + +func stacks(all bool) []goroutine { + buf := make([]byte, 16*1024) + for { + n := runtime.Stack(buf, all) + if n < len(buf) { + buf = buf[:n] + break + } + buf = make([]byte, len(buf)*2) + } + + var goroutines []goroutine + for _, gs := range strings.Split(string(buf), "\n\n") { + skip, rest, ok := strings.Cut(gs, "goroutine ") + if skip != "" || !ok { + panic(fmt.Errorf("1 unparsable goroutine stack:\n%s", gs)) + } + ids, rest, ok := strings.Cut(rest, " [") + if !ok { + panic(fmt.Errorf("2 unparsable goroutine stack:\n%s", gs)) + } + id, err := strconv.Atoi(ids) + if err != nil { + panic(fmt.Errorf("3 unparsable goroutine stack:\n%s", gs)) + } + state, rest, ok := strings.Cut(rest, "]") + var parent int + _, rest, ok = strings.Cut(rest, "\ncreated by ") + if ok && strings.Contains(rest, " in goroutine ") { + _, rest, ok := strings.Cut(rest, " in goroutine ") + if !ok { + panic(fmt.Errorf("4 unparsable goroutine stack:\n%s", gs)) + } + parents, rest, ok := strings.Cut(rest, "\n") + if !ok { + panic(fmt.Errorf("5 unparsable goroutine stack:\n%s", gs)) + } + parent, err = strconv.Atoi(parents) + if err != nil { + panic(fmt.Errorf("6 unparsable goroutine stack:\n%s", gs)) + } + } + goroutines = append(goroutines, goroutine{ + id: id, + parent: parent, + state: state, + }) + } + return goroutines +} + +// AdvanceTime advances the synthetic clock by d. +func (g *synctestGroup) AdvanceTime(d time.Duration) { + defer g.Wait() + g.mu.Lock() + defer g.mu.Unlock() + g.now = g.now.Add(d) + for tm := range g.timers { + if tm.when.After(g.now) { + continue + } + tm.run() + delete(g.timers, tm) + } +} + +// Now returns the current synthetic time. +func (g *synctestGroup) Now() time.Time { + g.mu.Lock() + defer g.mu.Unlock() + return g.now +} + +// TimeUntilEvent returns the amount of time until the next scheduled timer. +func (g *synctestGroup) TimeUntilEvent() (d time.Duration, scheduled bool) { + g.mu.Lock() + defer g.mu.Unlock() + for tm := range g.timers { + if dd := tm.when.Sub(g.now); !scheduled || dd < d { + d = dd + scheduled = true + } + } + return d, scheduled +} + +// Sleep is time.Sleep, but using synthetic time. +func (g *synctestGroup) Sleep(d time.Duration) { + tm := g.NewTimer(d) + <-tm.C() +} + +// NewTimer is time.NewTimer, but using synthetic time. +func (g *synctestGroup) NewTimer(d time.Duration) Timer { + return g.addTimer(d, &fakeTimer{ + ch: make(chan time.Time), + }) +} + +// AfterFunc is time.AfterFunc, but using synthetic time. +func (g *synctestGroup) AfterFunc(d time.Duration, f func()) Timer { + return g.addTimer(d, &fakeTimer{ + f: f, + }) +} + +// ContextWithTimeout is context.WithTimeout, but using synthetic time. +func (g *synctestGroup) ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + tm := g.AfterFunc(d, cancel) + return ctx, func() { + tm.Stop() + cancel() + } +} + +func (g *synctestGroup) addTimer(d time.Duration, tm *fakeTimer) *fakeTimer { + g.mu.Lock() + defer g.mu.Unlock() + tm.g = g + tm.when = g.now.Add(d) + if g.timers == nil { + g.timers = make(map[*fakeTimer]struct{}) + } + if tm.when.After(g.now) { + g.timers[tm] = struct{}{} + } else { + tm.run() + } + return tm +} + +type Timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +type fakeTimer struct { + g *synctestGroup + when time.Time + ch chan time.Time + f func() +} + +func (tm *fakeTimer) run() { + if tm.ch != nil { + tm.ch <- tm.g.now + } else { + go func() { + tm.g.Join() + tm.f() + }() + } +} + +func (tm *fakeTimer) C() <-chan time.Time { return tm.ch } + +func (tm *fakeTimer) Reset(d time.Duration) bool { + tm.g.mu.Lock() + defer tm.g.mu.Unlock() + _, stopped := tm.g.timers[tm] + if d <= 0 { + delete(tm.g.timers, tm) + tm.run() + } else { + tm.when = tm.g.now.Add(d) + tm.g.timers[tm] = struct{}{} + } + return stopped +} + +func (tm *fakeTimer) Stop() bool { + tm.g.mu.Lock() + defer tm.g.mu.Unlock() + _, stopped := tm.g.timers[tm] + delete(tm.g.timers, tm) + return stopped +} diff --git a/pkg/http2/testdata/draft-ietf-httpbis-http2.xml b/pkg/http2/testdata/draft-ietf-httpbis-http2.xml deleted file mode 100644 index 39d756d..0000000 --- a/pkg/http2/testdata/draft-ietf-httpbis-http2.xml +++ /dev/null @@ -1,5021 +0,0 @@ - - - - - - - - - - - - - - - - - - - Hypertext Transfer Protocol version 2 - - - Twist -
- mbelshe@chromium.org -
-
- - - Google, Inc -
- fenix@google.com -
-
- - - Mozilla -
- - 331 E Evelyn Street - Mountain View - CA - 94041 - US - - martin.thomson@gmail.com -
-
- - - Applications - HTTPbis - HTTP - SPDY - Web - - - - This specification describes an optimized expression of the semantics of the Hypertext - Transfer Protocol (HTTP). HTTP/2 enables a more efficient use of network resources and a - reduced perception of latency by introducing header field compression and allowing multiple - concurrent messages on the same connection. It also introduces unsolicited push of - representations from servers to clients. - - - This specification is an alternative to, but does not obsolete, the HTTP/1.1 message syntax. - HTTP's existing semantics remain unchanged. - - - - - - Discussion of this draft takes place on the HTTPBIS working group mailing list - (ietf-http-wg@w3.org), which is archived at . - - - Working Group information can be found at ; that specific to HTTP/2 are at . - - - The changes in this draft are summarized in . - - - -
- - -
- - - The Hypertext Transfer Protocol (HTTP) is a wildly successful protocol. However, the - HTTP/1.1 message format () has - several characteristics that have a negative overall effect on application performance - today. - - - In particular, HTTP/1.0 allowed only one request to be outstanding at a time on a given - TCP connection. HTTP/1.1 added request pipelining, but this only partially addressed - request concurrency and still suffers from head-of-line blocking. Therefore, HTTP/1.1 - clients that need to make many requests typically use multiple connections to a server in - order to achieve concurrency and thereby reduce latency. - - - Furthermore, HTTP header fields are often repetitive and verbose, causing unnecessary - network traffic, as well as causing the initial TCP congestion - window to quickly fill. This can result in excessive latency when multiple requests are - made on a new TCP connection. - - - HTTP/2 addresses these issues by defining an optimized mapping of HTTP's semantics to an - underlying connection. Specifically, it allows interleaving of request and response - messages on the same connection and uses an efficient coding for HTTP header fields. It - also allows prioritization of requests, letting more important requests complete more - quickly, further improving performance. - - - The resulting protocol is more friendly to the network, because fewer TCP connections can - be used in comparison to HTTP/1.x. This means less competition with other flows, and - longer-lived connections, which in turn leads to better utilization of available network - capacity. - - - Finally, HTTP/2 also enables more efficient processing of messages through use of binary - message framing. - -
- -
- - HTTP/2 provides an optimized transport for HTTP semantics. HTTP/2 supports all of the core - features of HTTP/1.1, but aims to be more efficient in several ways. - - - The basic protocol unit in HTTP/2 is a frame. Each frame - type serves a different purpose. For example, HEADERS and - DATA frames form the basis of HTTP requests and - responses; other frame types like SETTINGS, - WINDOW_UPDATE, and PUSH_PROMISE are used in support of other - HTTP/2 features. - - - Multiplexing of requests is achieved by having each HTTP request-response exchange - associated with its own stream. Streams are largely - independent of each other, so a blocked or stalled request or response does not prevent - progress on other streams. - - - Flow control and prioritization ensure that it is possible to efficiently use multiplexed - streams. Flow control helps to ensure that only data that - can be used by a receiver is transmitted. Prioritization ensures that limited resources can be directed - to the most important streams first. - - - HTTP/2 adds a new interaction mode, whereby a server can push - responses to a client. Server push allows a server to speculatively send a client - data that the server anticipates the client will need, trading off some network usage - against a potential latency gain. The server does this by synthesizing a request, which it - sends as a PUSH_PROMISE frame. The server is then able to send a response to - the synthetic request on a separate stream. - - - Frames that contain HTTP header fields are compressed. - HTTP requests can be highly redundant, so compression can reduce the size of requests and - responses significantly. - - -
- - The HTTP/2 specification is split into four parts: - - - Starting HTTP/2 covers how an HTTP/2 connection is - initiated. - - - The framing and streams layers describe the way HTTP/2 frames are - structured and formed into multiplexed streams. - - - Frame and error - definitions include details of the frame and error types used in HTTP/2. - - - HTTP mappings and additional - requirements describe how HTTP semantics are expressed using frames and - streams. - - - - - While some of the frame and stream layer concepts are isolated from HTTP, this - specification does not define a completely generic framing layer. The framing and streams - layers are tailored to the needs of the HTTP protocol and server push. - -
- -
- - The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD - NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as - described in RFC 2119. - - - All numeric values are in network byte order. Values are unsigned unless otherwise - indicated. Literal values are provided in decimal or hexadecimal as appropriate. - Hexadecimal literals are prefixed with 0x to distinguish them - from decimal literals. - - - The following terms are used: - - - The endpoint initiating the HTTP/2 connection. - - - A transport-layer connection between two endpoints. - - - An error that affects the entire HTTP/2 connection. - - - Either the client or server of the connection. - - - The smallest unit of communication within an HTTP/2 connection, consisting of a header - and a variable-length sequence of octets structured according to the frame type. - - - An endpoint. When discussing a particular endpoint, "peer" refers to the endpoint - that is remote to the primary subject of discussion. - - - An endpoint that is receiving frames. - - - An endpoint that is transmitting frames. - - - The endpoint which did not initiate the HTTP/2 connection. - - - A bi-directional flow of frames across a virtual channel within the HTTP/2 connection. - - - An error on the individual HTTP/2 stream. - - - - - Finally, the terms "gateway", "intermediary", "proxy", and "tunnel" are defined - in . - -
-
- -
- - An HTTP/2 connection is an application layer protocol running on top of a TCP connection - (). The client is the TCP connection initiator. - - - HTTP/2 uses the same "http" and "https" URI schemes used by HTTP/1.1. HTTP/2 shares the same - default port numbers: 80 for "http" URIs and 443 for "https" URIs. As a result, - implementations processing requests for target resource URIs like http://example.org/foo or https://example.com/bar are required to first discover whether the - upstream server (the immediate peer to which the client wishes to establish a connection) - supports HTTP/2. - - - - The means by which support for HTTP/2 is determined is different for "http" and "https" - URIs. Discovery for "http" URIs is described in . Discovery - for "https" URIs is described in . - - -
- - The protocol defined in this document has two identifiers. - - - - The string "h2" identifies the protocol where HTTP/2 uses TLS. This identifier is used in the TLS application layer protocol negotiation extension (ALPN) - field and any place that HTTP/2 over TLS is identified. - - - The "h2" string is serialized into an ALPN protocol identifier as the two octet - sequence: 0x68, 0x32. - - - - - The string "h2c" identifies the protocol where HTTP/2 is run over cleartext TCP. - This identifier is used in the HTTP/1.1 Upgrade header field and any place that - HTTP/2 over TCP is identified. - - - - - - Negotiating "h2" or "h2c" implies the use of the transport, security, framing and message - semantics described in this document. - - - RFC Editor's Note: please remove the remainder of this section prior to the - publication of a final version of this document. - - - Only implementations of the final, published RFC can identify themselves as "h2" or "h2c". - Until such an RFC exists, implementations MUST NOT identify themselves using these - strings. - - - Examples and text throughout the rest of this document use "h2" as a matter of - editorial convenience only. Implementations of draft versions MUST NOT identify using - this string. - - - Implementations of draft versions of the protocol MUST add the string "-" and the - corresponding draft number to the identifier. For example, draft-ietf-httpbis-http2-11 - over TLS is identified using the string "h2-11". - - - Non-compatible experiments that are based on these draft versions MUST append the string - "-" and an experiment name to the identifier. For example, an experimental implementation - of packet mood-based encoding based on draft-ietf-httpbis-http2-09 might identify itself - as "h2-09-emo". Note that any label MUST conform to the "token" syntax defined in - . Experimenters are - encouraged to coordinate their experiments on the ietf-http-wg@w3.org mailing list. - -
- -
- - A client that makes a request for an "http" URI without prior knowledge about support for - HTTP/2 uses the HTTP Upgrade mechanism (). The client makes an HTTP/1.1 request that includes an Upgrade - header field identifying HTTP/2 with the "h2c" token. The HTTP/1.1 request MUST include - exactly one HTTP2-Settings header field. - -
- For example: - - -]]> -
- - Requests that contain an entity body MUST be sent in their entirety before the client can - send HTTP/2 frames. This means that a large request entity can block the use of the - connection until it is completely sent. - - - If concurrency of an initial request with subsequent requests is important, an OPTIONS - request can be used to perform the upgrade to HTTP/2, at the cost of an additional - round-trip. - - - A server that does not support HTTP/2 can respond to the request as though the Upgrade - header field were absent: - -
- -HTTP/1.1 200 OK -Content-Length: 243 -Content-Type: text/html - -... - -
- - A server MUST ignore a "h2" token in an Upgrade header field. Presence of a token with - "h2" implies HTTP/2 over TLS, which is instead negotiated as described in . - - - A server that supports HTTP/2 can accept the upgrade with a 101 (Switching Protocols) - response. After the empty line that terminates the 101 response, the server can begin - sending HTTP/2 frames. These frames MUST include a response to the request that initiated - the Upgrade. - - -
- - For example: - - -HTTP/1.1 101 Switching Protocols -Connection: Upgrade -Upgrade: h2c - -[ HTTP/2 connection ... - -
- - The first HTTP/2 frame sent by the server is a SETTINGS frame () as the server connection preface (). Upon receiving the 101 response, the client sends a connection preface, which includes a - SETTINGS frame. - - - The HTTP/1.1 request that is sent prior to upgrade is assigned stream identifier 1 and is - assigned default priority values. Stream 1 is - implicitly half closed from the client toward the server, since the request is completed - as an HTTP/1.1 request. After commencing the HTTP/2 connection, stream 1 is used for the - response. - - -
- - A request that upgrades from HTTP/1.1 to HTTP/2 MUST include exactly one HTTP2-Settings header field. The HTTP2-Settings header field is a connection-specific header field - that includes parameters that govern the HTTP/2 connection, provided in anticipation of - the server accepting the request to upgrade. - -
- -
- - A server MUST NOT upgrade the connection to HTTP/2 if this header field is not present, - or if more than one is present. A server MUST NOT send this header field. - - - - The content of the HTTP2-Settings header field is the - payload of a SETTINGS frame (), encoded as a - base64url string (that is, the URL- and filename-safe Base64 encoding described in , with any trailing '=' characters omitted). The - ABNF production for token68 is - defined in . - - - Since the upgrade is only intended to apply to the immediate connection, a client - sending HTTP2-Settings MUST also send HTTP2-Settings as a connection option in the Connection header field to prevent it from being forwarded - downstream. - - - A server decodes and interprets these values as it would any other - SETTINGS frame. Acknowledgement of the - SETTINGS parameters is not necessary, since a 101 response serves as implicit - acknowledgment. Providing these values in the Upgrade request gives a client an - opportunity to provide parameters prior to receiving any frames from the server. - -
-
- -
- - A client that makes a request to an "https" URI uses TLS - with the application layer protocol negotiation extension. - - - HTTP/2 over TLS uses the "h2" application token. The "h2c" token MUST NOT be sent by a - client or selected by a server. - - - Once TLS negotiation is complete, both the client and the server send a connection preface. - -
- -
- - A client can learn that a particular server supports HTTP/2 by other means. For example, - describes a mechanism for advertising this capability. - - - A client MAY immediately send HTTP/2 frames to a server that is known to support HTTP/2, - after the connection preface; a server can - identify such a connection by the presence of the connection preface. This only affects - the establishment of HTTP/2 connections over cleartext TCP; implementations that support - HTTP/2 over TLS MUST use protocol negotiation in TLS. - - - Without additional information, prior support for HTTP/2 is not a strong signal that a - given server will support HTTP/2 for future connections. For example, it is possible for - server configurations to change, for configurations to differ between instances in - clustered servers, or for network conditions to change. - -
- -
- - Upon establishment of a TCP connection and determination that HTTP/2 will be used by both - peers, each endpoint MUST send a connection preface as a final confirmation and to - establish the initial SETTINGS parameters for the HTTP/2 connection. The client and - server each send a different connection preface. - - - The client connection preface starts with a sequence of 24 octets, which in hex notation - are: - -
- -
- - (the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence - is followed by a SETTINGS frame (). The - SETTINGS frame MAY be empty. The client sends the client connection - preface immediately upon receipt of a 101 Switching Protocols response (indicating a - successful upgrade), or as the first application data octets of a TLS connection. If - starting an HTTP/2 connection with prior knowledge of server support for the protocol, the - client connection preface is sent upon connection establishment. - - - - - The client connection preface is selected so that a large proportion of HTTP/1.1 or - HTTP/1.0 servers and intermediaries do not attempt to process further frames. Note - that this does not address the concerns raised in . - - - - - The server connection preface consists of a potentially empty SETTINGS - frame () that MUST be the first frame the server sends in the - HTTP/2 connection. - - - The SETTINGS frames received from a peer as part of the connection preface - MUST be acknowledged (see ) after sending the connection - preface. - - - To avoid unnecessary latency, clients are permitted to send additional frames to the - server immediately after sending the client connection preface, without waiting to receive - the server connection preface. It is important to note, however, that the server - connection preface SETTINGS frame might include parameters that necessarily - alter how a client is expected to communicate with the server. Upon receiving the - SETTINGS frame, the client is expected to honor any parameters established. - In some configurations, it is possible for the server to transmit SETTINGS - before the client sends additional frames, providing an opportunity to avoid this issue. - - - Clients and servers MUST treat an invalid connection preface as a connection error of type - PROTOCOL_ERROR. A GOAWAY frame () - MAY be omitted in this case, since an invalid preface indicates that the peer is not using - HTTP/2. - -
-
- -
- - Once the HTTP/2 connection is established, endpoints can begin exchanging frames. - - -
- - All frames begin with a fixed 9-octet header followed by a variable-length payload. - -
- -
- - The fields of the frame header are defined as: - - - - The length of the frame payload expressed as an unsigned 24-bit integer. Values - greater than 214 (16,384) MUST NOT be sent unless the receiver has - set a larger value for SETTINGS_MAX_FRAME_SIZE. - - - The 9 octets of the frame header are not included in this value. - - - - - The 8-bit type of the frame. The frame type determines the format and semantics of - the frame. Implementations MUST ignore and discard any frame that has a type that - is unknown. - - - - - An 8-bit field reserved for frame-type specific boolean flags. - - - Flags are assigned semantics specific to the indicated frame type. Flags that have - no defined semantics for a particular frame type MUST be ignored, and MUST be left - unset (0) when sending. - - - - - A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST - remain unset (0) when sending and MUST be ignored when receiving. - - - - - A 31-bit stream identifier (see ). The value 0 is - reserved for frames that are associated with the connection as a whole as opposed to - an individual stream. - - - - - - The structure and content of the frame payload is dependent entirely on the frame type. - -
- -
- - The size of a frame payload is limited by the maximum size that a receiver advertises in - the SETTINGS_MAX_FRAME_SIZE setting. This setting can have any value - between 214 (16,384) and 224-1 (16,777,215) octets, - inclusive. - - - All implementations MUST be capable of receiving and minimally processing frames up to - 214 octets in length, plus the 9 octet frame - header. The size of the frame header is not included when describing frame sizes. - - - Certain frame types, such as PING, impose additional limits - on the amount of payload data allowed. - - - - - If a frame size exceeds any defined limit, or is too small to contain mandatory frame - data, the endpoint MUST send a FRAME_SIZE_ERROR error. A frame size error - in a frame that could alter the state of the entire connection MUST be treated as a connection error; this includes any frame carrying - a header block (that is, HEADERS, - PUSH_PROMISE, and CONTINUATION), SETTINGS, - and any WINDOW_UPDATE frame with a stream identifier of 0. - - - Endpoints are not obligated to use all available space in a frame. Responsiveness can be - improved by using frames that are smaller than the permitted maximum size. Sending large - frames can result in delays in sending time-sensitive frames (such - RST_STREAM, WINDOW_UPDATE, or PRIORITY) - which if blocked by the transmission of a large frame, could affect performance. - -
- -
- - Just as in HTTP/1, a header field in HTTP/2 is a name with one or more associated values. - They are used within HTTP request and response messages as well as server push operations - (see ). - - - Header lists are collections of zero or more header fields. When transmitted over a - connection, a header list is serialized into a header block using HTTP Header Compression. The serialized header block is then - divided into one or more octet sequences, called header block fragments, and transmitted - within the payload of HEADERS, PUSH_PROMISE or CONTINUATION frames. - - - The Cookie header field is treated specially by the HTTP - mapping (see ). - - - A receiving endpoint reassembles the header block by concatenating its fragments, then - decompresses the block to reconstruct the header list. - - - A complete header block consists of either: - - - a single HEADERS or PUSH_PROMISE frame, - with the END_HEADERS flag set, or - - - a HEADERS or PUSH_PROMISE frame with the END_HEADERS - flag cleared and one or more CONTINUATION frames, - where the last CONTINUATION frame has the END_HEADERS flag set. - - - - - Header compression is stateful. One compression context and one decompression context is - used for the entire connection. Each header block is processed as a discrete unit. - Header blocks MUST be transmitted as a contiguous sequence of frames, with no interleaved - frames of any other type or from any other stream. The last frame in a sequence of - HEADERS or CONTINUATION frames MUST have the END_HEADERS - flag set. The last frame in a sequence of PUSH_PROMISE or - CONTINUATION frames MUST have the END_HEADERS flag set. This allows a - header block to be logically equivalent to a single frame. - - - Header block fragments can only be sent as the payload of HEADERS, - PUSH_PROMISE or CONTINUATION frames, because these frames - carry data that can modify the compression context maintained by a receiver. An endpoint - receiving HEADERS, PUSH_PROMISE or - CONTINUATION frames MUST reassemble header blocks and perform decompression - even if the frames are to be discarded. A receiver MUST terminate the connection with a - connection error of type - COMPRESSION_ERROR if it does not decompress a header block. - -
-
- -
- - A "stream" is an independent, bi-directional sequence of frames exchanged between the client - and server within an HTTP/2 connection. Streams have several important characteristics: - - - A single HTTP/2 connection can contain multiple concurrently open streams, with either - endpoint interleaving frames from multiple streams. - - - Streams can be established and used unilaterally or shared by either the client or - server. - - - Streams can be closed by either endpoint. - - - The order in which frames are sent on a stream is significant. Recipients process frames - in the order they are received. In particular, the order of HEADERS, - and DATA frames is semantically significant. - - - Streams are identified by an integer. Stream identifiers are assigned to streams by the - endpoint initiating the stream. - - - - -
- - The lifecycle of a stream is shown in . - - -
- - | |<-----------' | - | R | closed | R | - `-------------------->| |<--------------------' - +--------+ - - H: HEADERS frame (with implied CONTINUATIONs) - PP: PUSH_PROMISE frame (with implied CONTINUATIONs) - ES: END_STREAM flag - R: RST_STREAM frame -]]> - -
- - - Note that this diagram shows stream state transitions and the frames and flags that affect - those transitions only. In this regard, CONTINUATION frames do not result - in state transitions; they are effectively part of the HEADERS or - PUSH_PROMISE that they follow. For this purpose, the END_STREAM flag is - processed as a separate event to the frame that bears it; a HEADERS frame - with the END_STREAM flag set can cause two state transitions. - - - Both endpoints have a subjective view of the state of a stream that could be different - when frames are in transit. Endpoints do not coordinate the creation of streams; they are - created unilaterally by either endpoint. The negative consequences of a mismatch in - states are limited to the "closed" state after sending RST_STREAM, where - frames might be received for some time after closing. - - - Streams have the following states: - - - - - - All streams start in the "idle" state. In this state, no frames have been - exchanged. - - - The following transitions are valid from this state: - - - Sending or receiving a HEADERS frame causes the stream to become - "open". The stream identifier is selected as described in . The same HEADERS frame can also - cause a stream to immediately become "half closed". - - - Sending a PUSH_PROMISE frame marks the associated stream for - later use. The stream state for the reserved stream transitions to "reserved - (local)". - - - Receiving a PUSH_PROMISE frame marks the associated stream as - reserved by the remote peer. The state of the stream becomes "reserved - (remote)". - - - - - Receiving any frames other than HEADERS or - PUSH_PROMISE on a stream in this state MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - - - A stream in the "reserved (local)" state is one that has been promised by sending a - PUSH_PROMISE frame. A PUSH_PROMISE frame reserves an - idle stream by associating the stream with an open stream that was initiated by the - remote peer (see ). - - - In this state, only the following transitions are possible: - - - The endpoint can send a HEADERS frame. This causes the stream to - open in a "half closed (remote)" state. - - - Either endpoint can send a RST_STREAM frame to cause the stream - to become "closed". This releases the stream reservation. - - - - - An endpoint MUST NOT send any type of frame other than HEADERS or - RST_STREAM in this state. - - - A PRIORITY frame MAY be received in this state. Receiving any type - of frame other than RST_STREAM or PRIORITY on a stream - in this state MUST be treated as a connection - error of type PROTOCOL_ERROR. - - - - - - - A stream in the "reserved (remote)" state has been reserved by a remote peer. - - - In this state, only the following transitions are possible: - - - Receiving a HEADERS frame causes the stream to transition to - "half closed (local)". - - - Either endpoint can send a RST_STREAM frame to cause the stream - to become "closed". This releases the stream reservation. - - - - - An endpoint MAY send a PRIORITY frame in this state to reprioritize - the reserved stream. An endpoint MUST NOT send any type of frame other than - RST_STREAM, WINDOW_UPDATE, or PRIORITY - in this state. - - - Receiving any type of frame other than HEADERS or - RST_STREAM on a stream in this state MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - - - A stream in the "open" state may be used by both peers to send frames of any type. - In this state, sending peers observe advertised stream - level flow control limits. - - - From this state either endpoint can send a frame with an END_STREAM flag set, which - causes the stream to transition into one of the "half closed" states: an endpoint - sending an END_STREAM flag causes the stream state to become "half closed (local)"; - an endpoint receiving an END_STREAM flag causes the stream state to become "half - closed (remote)". - - - Either endpoint can send a RST_STREAM frame from this state, causing - it to transition immediately to "closed". - - - - - - - A stream that is in the "half closed (local)" state cannot be used for sending - frames. Only WINDOW_UPDATE, PRIORITY and - RST_STREAM frames can be sent in this state. - - - A stream transitions from this state to "closed" when a frame that contains an - END_STREAM flag is received, or when either peer sends a RST_STREAM - frame. - - - A receiver can ignore WINDOW_UPDATE frames in this state, which might - arrive for a short period after a frame bearing the END_STREAM flag is sent. - - - PRIORITY frames received in this state are used to reprioritize - streams that depend on the current stream. - - - - - - - A stream that is "half closed (remote)" is no longer being used by the peer to send - frames. In this state, an endpoint is no longer obligated to maintain a receiver - flow control window if it performs flow control. - - - If an endpoint receives additional frames for a stream that is in this state, other - than WINDOW_UPDATE, PRIORITY or - RST_STREAM, it MUST respond with a stream error of type - STREAM_CLOSED. - - - A stream that is "half closed (remote)" can be used by the endpoint to send frames - of any type. In this state, the endpoint continues to observe advertised stream level flow control limits. - - - A stream can transition from this state to "closed" by sending a frame that contains - an END_STREAM flag, or when either peer sends a RST_STREAM frame. - - - - - - - The "closed" state is the terminal state. - - - An endpoint MUST NOT send frames other than PRIORITY on a closed - stream. An endpoint that receives any frame other than PRIORITY - after receiving a RST_STREAM MUST treat that as a stream error of type - STREAM_CLOSED. Similarly, an endpoint that receives any frames after - receiving a frame with the END_STREAM flag set MUST treat that as a connection error of type - STREAM_CLOSED, unless the frame is permitted as described below. - - - WINDOW_UPDATE or RST_STREAM frames can be received in - this state for a short period after a DATA or HEADERS - frame containing an END_STREAM flag is sent. Until the remote peer receives and - processes RST_STREAM or the frame bearing the END_STREAM flag, it - might send frames of these types. Endpoints MUST ignore - WINDOW_UPDATE or RST_STREAM frames received in this - state, though endpoints MAY choose to treat frames that arrive a significant time - after sending END_STREAM as a connection - error of type PROTOCOL_ERROR. - - - PRIORITY frames can be sent on closed streams to prioritize streams - that are dependent on the closed stream. Endpoints SHOULD process - PRIORITY frame, though they can be ignored if the stream has been - removed from the dependency tree (see ). - - - If this state is reached as a result of sending a RST_STREAM frame, - the peer that receives the RST_STREAM might have already sent - or - enqueued for sending - frames on the stream that cannot be withdrawn. An endpoint - MUST ignore frames that it receives on closed streams after it has sent a - RST_STREAM frame. An endpoint MAY choose to limit the period over - which it ignores frames and treat frames that arrive after this time as being in - error. - - - Flow controlled frames (i.e., DATA) received after sending - RST_STREAM are counted toward the connection flow control window. - Even though these frames might be ignored, because they are sent before the sender - receives the RST_STREAM, the sender will consider the frames to count - against the flow control window. - - - An endpoint might receive a PUSH_PROMISE frame after it sends - RST_STREAM. PUSH_PROMISE causes a stream to become - "reserved" even if the associated stream has been reset. Therefore, a - RST_STREAM is needed to close an unwanted promised stream. - - - - - - In the absence of more specific guidance elsewhere in this document, implementations - SHOULD treat the receipt of a frame that is not expressly permitted in the description of - a state as a connection error of type - PROTOCOL_ERROR. Frame of unknown types are ignored. - - - An example of the state transitions for an HTTP request/response exchange can be found in - . An example of the state transitions for server push can be - found in and . - - -
- - Streams are identified with an unsigned 31-bit integer. Streams initiated by a client - MUST use odd-numbered stream identifiers; those initiated by the server MUST use - even-numbered stream identifiers. A stream identifier of zero (0x0) is used for - connection control messages; the stream identifier zero cannot be used to establish a - new stream. - - - HTTP/1.1 requests that are upgraded to HTTP/2 (see ) are - responded to with a stream identifier of one (0x1). After the upgrade - completes, stream 0x1 is "half closed (local)" to the client. Therefore, stream 0x1 - cannot be selected as a new stream identifier by a client that upgrades from HTTP/1.1. - - - The identifier of a newly established stream MUST be numerically greater than all - streams that the initiating endpoint has opened or reserved. This governs streams that - are opened using a HEADERS frame and streams that are reserved using - PUSH_PROMISE. An endpoint that receives an unexpected stream identifier - MUST respond with a connection error of - type PROTOCOL_ERROR. - - - The first use of a new stream identifier implicitly closes all streams in the "idle" - state that might have been initiated by that peer with a lower-valued stream identifier. - For example, if a client sends a HEADERS frame on stream 7 without ever - sending a frame on stream 5, then stream 5 transitions to the "closed" state when the - first frame for stream 7 is sent or received. - - - Stream identifiers cannot be reused. Long-lived connections can result in an endpoint - exhausting the available range of stream identifiers. A client that is unable to - establish a new stream identifier can establish a new connection for new streams. A - server that is unable to establish a new stream identifier can send a - GOAWAY frame so that the client is forced to open a new connection for - new streams. - -
- -
- - A peer can limit the number of concurrently active streams using the - SETTINGS_MAX_CONCURRENT_STREAMS parameter (see ) within a SETTINGS frame. The maximum concurrent - streams setting is specific to each endpoint and applies only to the peer that receives - the setting. That is, clients specify the maximum number of concurrent streams the - server can initiate, and servers specify the maximum number of concurrent streams the - client can initiate. - - - Streams that are in the "open" state, or either of the "half closed" states count toward - the maximum number of streams that an endpoint is permitted to open. Streams in any of - these three states count toward the limit advertised in the - SETTINGS_MAX_CONCURRENT_STREAMS setting. Streams in either of the - "reserved" states do not count toward the stream limit. - - - Endpoints MUST NOT exceed the limit set by their peer. An endpoint that receives a - HEADERS frame that causes their advertised concurrent stream limit to be - exceeded MUST treat this as a stream error. An - endpoint that wishes to reduce the value of - SETTINGS_MAX_CONCURRENT_STREAMS to a value that is below the current - number of open streams can either close streams that exceed the new value or allow - streams to complete. - -
-
- -
- - Using streams for multiplexing introduces contention over use of the TCP connection, - resulting in blocked streams. A flow control scheme ensures that streams on the same - connection do not destructively interfere with each other. Flow control is used for both - individual streams and for the connection as a whole. - - - HTTP/2 provides for flow control through use of the WINDOW_UPDATE frame. - - -
- - HTTP/2 stream flow control aims to allow a variety of flow control algorithms to be - used without requiring protocol changes. Flow control in HTTP/2 has the following - characteristics: - - - Flow control is specific to a connection; i.e., it is "hop-by-hop", not - "end-to-end". - - - Flow control is based on window update frames. Receivers advertise how many octets - they are prepared to receive on a stream and for the entire connection. This is a - credit-based scheme. - - - Flow control is directional with overall control provided by the receiver. A - receiver MAY choose to set any window size that it desires for each stream and for - the entire connection. A sender MUST respect flow control limits imposed by a - receiver. Clients, servers and intermediaries all independently advertise their - flow control window as a receiver and abide by the flow control limits set by - their peer when sending. - - - The initial value for the flow control window is 65,535 octets for both new streams - and the overall connection. - - - The frame type determines whether flow control applies to a frame. Of the frames - specified in this document, only DATA frames are subject to flow - control; all other frame types do not consume space in the advertised flow control - window. This ensures that important control frames are not blocked by flow control. - - - Flow control cannot be disabled. - - - HTTP/2 defines only the format and semantics of the WINDOW_UPDATE - frame (). This document does not stipulate how a - receiver decides when to send this frame or the value that it sends, nor does it - specify how a sender chooses to send packets. Implementations are able to select - any algorithm that suits their needs. - - - - - Implementations are also responsible for managing how requests and responses are sent - based on priority; choosing how to avoid head of line blocking for requests; and - managing the creation of new streams. Algorithm choices for these could interact with - any flow control algorithm. - -
- -
- - Flow control is defined to protect endpoints that are operating under resource - constraints. For example, a proxy needs to share memory between many connections, and - also might have a slow upstream connection and a fast downstream one. Flow control - addresses cases where the receiver is unable process data on one stream, yet wants to - continue to process other streams in the same connection. - - - Deployments that do not require this capability can advertise a flow control window of - the maximum size, incrementing the available space when new data is received. This - effectively disables flow control for that receiver. Conversely, a sender is always - subject to the flow control window advertised by the receiver. - - - Deployments with constrained resources (for example, memory) can employ flow control to - limit the amount of memory a peer can consume. Note, however, that this can lead to - suboptimal use of available network resources if flow control is enabled without - knowledge of the bandwidth-delay product (see ). - - - Even with full awareness of the current bandwidth-delay product, implementation of flow - control can be difficult. When using flow control, the receiver MUST read from the TCP - receive buffer in a timely fashion. Failure to do so could lead to a deadlock when - critical frames, such as WINDOW_UPDATE, are not read and acted upon. - -
-
- -
- - A client can assign a priority for a new stream by including prioritization information in - the HEADERS frame that opens the stream. For an existing - stream, the PRIORITY frame can be used to change the - priority. - - - The purpose of prioritization is to allow an endpoint to express how it would prefer its - peer allocate resources when managing concurrent streams. Most importantly, priority can - be used to select streams for transmitting frames when there is limited capacity for - sending. - - - Streams can be prioritized by marking them as dependent on the completion of other streams - (). Each dependency is assigned a relative weight, a number - that is used to determine the relative proportion of available resources that are assigned - to streams dependent on the same stream. - - - - Explicitly setting the priority for a stream is input to a prioritization process. It - does not guarantee any particular processing or transmission order for the stream relative - to any other stream. An endpoint cannot force a peer to process concurrent streams in a - particular order using priority. Expressing priority is therefore only ever a suggestion. - - - Providing prioritization information is optional, so default values are used if no - explicit indicator is provided (). - - -
- - Each stream can be given an explicit dependency on another stream. Including a - dependency expresses a preference to allocate resources to the identified stream rather - than to the dependent stream. - - - A stream that is not dependent on any other stream is given a stream dependency of 0x0. - In other words, the non-existent stream 0 forms the root of the tree. - - - A stream that depends on another stream is a dependent stream. The stream upon which a - stream is dependent is a parent stream. A dependency on a stream that is not currently - in the tree - such as a stream in the "idle" state - results in that stream being given - a default priority. - - - When assigning a dependency on another stream, the stream is added as a new dependency - of the parent stream. Dependent streams that share the same parent are not ordered with - respect to each other. For example, if streams B and C are dependent on stream A, and - if stream D is created with a dependency on stream A, this results in a dependency order - of A followed by B, C, and D in any order. - -
- /|\ - B C B D C -]]> -
- - An exclusive flag allows for the insertion of a new level of dependencies. The - exclusive flag causes the stream to become the sole dependency of its parent stream, - causing other dependencies to become dependent on the exclusive stream. In the - previous example, if stream D is created with an exclusive dependency on stream A, this - results in D becoming the dependency parent of B and C. - -
- D - B C / \ - B C -]]> -
- - Inside the dependency tree, a dependent stream SHOULD only be allocated resources if all - of the streams that it depends on (the chain of parent streams up to 0x0) are either - closed, or it is not possible to make progress on them. - - - A stream cannot depend on itself. An endpoint MUST treat this as a stream error of type PROTOCOL_ERROR. - -
- -
- - All dependent streams are allocated an integer weight between 1 and 256 (inclusive). - - - Streams with the same parent SHOULD be allocated resources proportionally based on their - weight. Thus, if stream B depends on stream A with weight 4, and C depends on stream A - with weight 12, and if no progress can be made on A, stream B ideally receives one third - of the resources allocated to stream C. - -
- -
- - Stream priorities are changed using the PRIORITY frame. Setting a - dependency causes a stream to become dependent on the identified parent stream. - - - Dependent streams move with their parent stream if the parent is reprioritized. Setting - a dependency with the exclusive flag for a reprioritized stream moves all the - dependencies of the new parent stream to become dependent on the reprioritized stream. - - - If a stream is made dependent on one of its own dependencies, the formerly dependent - stream is first moved to be dependent on the reprioritized stream's previous parent. - The moved dependency retains its weight. - -
- - For example, consider an original dependency tree where B and C depend on A, D and E - depend on C, and F depends on D. If A is made dependent on D, then D takes the place - of A. All other dependency relationships stay the same, except for F, which becomes - dependent on A if the reprioritization is exclusive. - - F B C ==> F A OR A - / \ | / \ /|\ - D E E B C B C F - | | | - F E E - (intermediate) (non-exclusive) (exclusive) -]]> -
-
- -
- - When a stream is removed from the dependency tree, its dependencies can be moved to - become dependent on the parent of the closed stream. The weights of new dependencies - are recalculated by distributing the weight of the dependency of the closed stream - proportionally based on the weights of its dependencies. - - - Streams that are removed from the dependency tree cause some prioritization information - to be lost. Resources are shared between streams with the same parent stream, which - means that if a stream in that set closes or becomes blocked, any spare capacity - allocated to a stream is distributed to the immediate neighbors of the stream. However, - if the common dependency is removed from the tree, those streams share resources with - streams at the next highest level. - - - For example, assume streams A and B share a parent, and streams C and D both depend on - stream A. Prior to the removal of stream A, if streams A and D are unable to proceed, - then stream C receives all the resources dedicated to stream A. If stream A is removed - from the tree, the weight of stream A is divided between streams C and D. If stream D - is still unable to proceed, this results in stream C receiving a reduced proportion of - resources. For equal starting weights, C receives one third, rather than one half, of - available resources. - - - It is possible for a stream to become closed while prioritization information that - creates a dependency on that stream is in transit. If a stream identified in a - dependency has no associated priority information, then the dependent stream is instead - assigned a default priority. This potentially creates - suboptimal prioritization, since the stream could be given a priority that is different - to what is intended. - - - To avoid these problems, an endpoint SHOULD retain stream prioritization state for a - period after streams become closed. The longer state is retained, the lower the chance - that streams are assigned incorrect or default priority values. - - - This could create a large state burden for an endpoint, so this state MAY be limited. - An endpoint MAY apply a fixed upper limit on the number of closed streams for which - prioritization state is tracked to limit state exposure. The amount of additional state - an endpoint maintains could be dependent on load; under high load, prioritization state - can be discarded to limit resource commitments. In extreme cases, an endpoint could - even discard prioritization state for active or reserved streams. If a fixed limit is - applied, endpoints SHOULD maintain state for at least as many streams as allowed by - their setting for SETTINGS_MAX_CONCURRENT_STREAMS. - - - An endpoint receiving a PRIORITY frame that changes the priority of a - closed stream SHOULD alter the dependencies of the streams that depend on it, if it has - retained enough state to do so. - -
- -
- - Providing priority information is optional. Streams are assigned a non-exclusive - dependency on stream 0x0 by default. Pushed streams - initially depend on their associated stream. In both cases, streams are assigned a - default weight of 16. - -
-
- -
- - HTTP/2 framing permits two classes of error: - - - An error condition that renders the entire connection unusable is a connection error. - - - An error in an individual stream is a stream error. - - - - - A list of error codes is included in . - - -
- - A connection error is any error which prevents further processing of the framing layer, - or which corrupts any connection state. - - - An endpoint that encounters a connection error SHOULD first send a GOAWAY - frame () with the stream identifier of the last stream that it - successfully received from its peer. The GOAWAY frame includes an error - code that indicates why the connection is terminating. After sending the - GOAWAY frame, the endpoint MUST close the TCP connection. - - - It is possible that the GOAWAY will not be reliably received by the - receiving endpoint (see ). In the event of a connection error, - GOAWAY only provides a best effort attempt to communicate with the peer - about why the connection is being terminated. - - - An endpoint can end a connection at any time. In particular, an endpoint MAY choose to - treat a stream error as a connection error. Endpoints SHOULD send a - GOAWAY frame when ending a connection, providing that circumstances - permit it. - -
- -
- - A stream error is an error related to a specific stream that does not affect processing - of other streams. - - - An endpoint that detects a stream error sends a RST_STREAM frame () that contains the stream identifier of the stream where the error - occurred. The RST_STREAM frame includes an error code that indicates the - type of error. - - - A RST_STREAM is the last frame that an endpoint can send on a stream. - The peer that sends the RST_STREAM frame MUST be prepared to receive any - frames that were sent or enqueued for sending by the remote peer. These frames can be - ignored, except where they modify connection state (such as the state maintained for - header compression, or flow control). - - - Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for - any stream. However, an endpoint MAY send additional RST_STREAM frames if - it receives frames on a closed stream after more than a round-trip time. This behavior - is permitted to deal with misbehaving implementations. - - - An endpoint MUST NOT send a RST_STREAM in response to an - RST_STREAM frame, to avoid looping. - -
- -
- - If the TCP connection is closed or reset while streams remain in open or half closed - states, then the endpoint MUST assume that those streams were abnormally interrupted and - could be incomplete. - -
-
- -
- - HTTP/2 permits extension of the protocol. Protocol extensions can be used to provide - additional services or alter any aspect of the protocol, within the limitations described - in this section. Extensions are effective only within the scope of a single HTTP/2 - connection. - - - Extensions are permitted to use new frame types, new - settings, or new error - codes. Registries are established for managing these extension points: frame types, settings and - error codes. - - - Implementations MUST ignore unknown or unsupported values in all extensible protocol - elements. Implementations MUST discard frames that have unknown or unsupported types. - This means that any of these extension points can be safely used by extensions without - prior arrangement or negotiation. However, extension frames that appear in the middle of - a header block are not permitted; these MUST be treated - as a connection error of type - PROTOCOL_ERROR. - - - However, extensions that could change the semantics of existing protocol components MUST - be negotiated before being used. For example, an extension that changes the layout of the - HEADERS frame cannot be used until the peer has given a positive signal - that this is acceptable. In this case, it could also be necessary to coordinate when the - revised layout comes into effect. Note that treating any frame other than - DATA frames as flow controlled is such a change in semantics, and can only - be done through negotiation. - - - This document doesn't mandate a specific method for negotiating the use of an extension, - but notes that a setting could be used for that - purpose. If both peers set a value that indicates willingness to use the extension, then - the extension can be used. If a setting is used for extension negotiation, the initial - value MUST be defined so that the extension is initially disabled. - -
-
- -
- - This specification defines a number of frame types, each identified by a unique 8-bit type - code. Each frame type serves a distinct purpose either in the establishment and management - of the connection as a whole, or of individual streams. - - - The transmission of specific frame types can alter the state of a connection. If endpoints - fail to maintain a synchronized view of the connection state, successful communication - within the connection will no longer be possible. Therefore, it is important that endpoints - have a shared comprehension of how the state is affected by the use any given frame. - - -
- - DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated - with a stream. One or more DATA frames are used, for instance, to carry HTTP request or - response payloads. - - - DATA frames MAY also contain arbitrary padding. Padding can be added to DATA frames to - obscure the size of messages. - -
- -
- - The DATA frame contains the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is optional and is only present if the PADDED flag is set. - - - Application data. The amount of data is the remainder of the frame payload after - subtracting the length of the other fields that are present. - - - Padding octets that contain no application semantic value. Padding octets MUST be set - to zero when sending and ignored when receiving. - - - - - - The DATA frame defines the following flags: - - - Bit 1 being set indicates that this frame is the last that the endpoint will send for - the identified stream. Setting this flag causes the stream to enter one of the "half closed" states or the "closed" state. - - - Bit 4 being set indicates that the Pad Length field and any padding that it describes - is present. - - - - - DATA frames MUST be associated with a stream. If a DATA frame is received whose stream - identifier field is 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - DATA frames are subject to flow control and can only be sent when a stream is in the - "open" or "half closed (remote)" states. The entire DATA frame payload is included in flow - control, including Pad Length and Padding fields if present. If a DATA frame is received - whose stream is not in "open" or "half closed (local)" state, the recipient MUST respond - with a stream error of type - STREAM_CLOSED. - - - The total number of padding octets is determined by the value of the Pad Length field. If - the length of the padding is greater than the length of the frame payload, the recipient - MUST treat this as a connection error of - type PROTOCOL_ERROR. - - - A frame can be increased in size by one octet by including a Pad Length field with a - value of zero. - - - - - Padding is a security feature; see . - -
- -
- - The HEADERS frame (type=0x1) is used to open a stream, - and additionally carries a header block fragment. HEADERS frames can be sent on a stream - in the "open" or "half closed (remote)" states. - -
- -
- - The HEADERS frame payload has the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is only present if the PADDED flag is set. - - - A single bit flag indicates that the stream dependency is exclusive, see . This field is only present if the PRIORITY flag is set. - - - A 31-bit stream identifier for the stream that this stream depends on, see . This field is only present if the PRIORITY flag is set. - - - An 8-bit weight for the stream, see . Add one to the - value to obtain a weight between 1 and 256. This field is only present if the - PRIORITY flag is set. - - - A header block fragment. - - - Padding octets that contain no application semantic value. Padding octets MUST be set - to zero when sending and ignored when receiving. - - - - - - The HEADERS frame defines the following flags: - - - - Bit 1 being set indicates that the header block is - the last that the endpoint will send for the identified stream. Setting this flag - causes the stream to enter one of "half closed" - states. - - - A HEADERS frame carries the END_STREAM flag that signals the end of a stream. - However, a HEADERS frame with the END_STREAM flag set can be followed by - CONTINUATION frames on the same stream. Logically, the - CONTINUATION frames are part of the HEADERS frame. - - - - - Bit 3 being set indicates that this frame contains an entire header block and is not followed by any - CONTINUATION frames. - - - A HEADERS frame without the END_HEADERS flag set MUST be followed by a - CONTINUATION frame for the same stream. A receiver MUST treat the - receipt of any other type of frame or a frame on a different stream as a connection error of type - PROTOCOL_ERROR. - - - - - Bit 4 being set indicates that the Pad Length field and any padding that it - describes is present. - - - - - Bit 6 being set indicates that the Exclusive Flag (E), Stream Dependency, and Weight - fields are present; see . - - - - - - - The payload of a HEADERS frame contains a header block - fragment. A header block that does not fit within a HEADERS frame is continued in - a CONTINUATION frame. - - - - HEADERS frames MUST be associated with a stream. If a HEADERS frame is received whose - stream identifier field is 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - - The HEADERS frame changes the connection state as described in . - - - - The HEADERS frame includes optional padding. Padding fields and flags are identical to - those defined for DATA frames. - - - Prioritization information in a HEADERS frame is logically equivalent to a separate - PRIORITY frame, but inclusion in HEADERS avoids the potential for churn in - stream prioritization when new streams are created. Priorization fields in HEADERS frames - subsequent to the first on a stream reprioritize the - stream. - -
- -
- - The PRIORITY frame (type=0x2) specifies the sender-advised - priority of a stream. It can be sent at any time for an existing stream, including - closed streams. This enables reprioritization of existing streams. - -
- -
- - The payload of a PRIORITY frame contains the following fields: - - - A single bit flag indicates that the stream dependency is exclusive, see . - - - A 31-bit stream identifier for the stream that this stream depends on, see . - - - An 8-bit weight for the identified stream dependency, see . Add one to the value to obtain a weight between 1 and 256. - - - - - - The PRIORITY frame does not define any flags. - - - - The PRIORITY frame is associated with an existing stream. If a PRIORITY frame is received - with a stream identifier of 0x0, the recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - The PRIORITY frame can be sent on a stream in any of the "reserved (remote)", "open", - "half closed (local)", "half closed (remote)", or "closed" states, though it cannot be - sent between consecutive frames that comprise a single header - block. Note that this frame could arrive after processing or frame sending has - completed, which would cause it to have no effect on the current stream. For a stream - that is in the "half closed (remote)" or "closed" - state, this frame can only affect - processing of the current stream and not frame transmission. - - - The PRIORITY frame is the only frame that can be sent for a stream in the "closed" state. - This allows for the reprioritization of a group of dependent streams by altering the - priority of a parent stream, which might be closed. However, a PRIORITY frame sent on a - closed stream risks being ignored due to the peer having discarded priority state - information for that stream. - -
- -
- - The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream. When sent by - the initiator of a stream, it indicates that they wish to cancel the stream or that an - error condition has occurred. When sent by the receiver of a stream, it indicates that - either the receiver is rejecting the stream, requesting that the stream be cancelled, or - that an error condition has occurred. - -
- -
- - - The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error code. The error code indicates why the stream is being - terminated. - - - - The RST_STREAM frame does not define any flags. - - - - The RST_STREAM frame fully terminates the referenced stream and causes it to enter the - closed state. After receiving a RST_STREAM on a stream, the receiver MUST NOT send - additional frames for that stream, with the exception of PRIORITY. However, - after sending the RST_STREAM, the sending endpoint MUST be prepared to receive and process - additional frames sent on the stream that might have been sent by the peer prior to the - arrival of the RST_STREAM. - - - - RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame is received - with a stream identifier of 0x0, the recipient MUST treat this as a connection error of type - PROTOCOL_ERROR. - - - - RST_STREAM frames MUST NOT be sent for a stream in the "idle" state. If a RST_STREAM - frame identifying an idle stream is received, the recipient MUST treat this as a connection error of type - PROTOCOL_ERROR. - - -
- -
- - The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints - communicate, such as preferences and constraints on peer behavior. The SETTINGS frame is - also used to acknowledge the receipt of those parameters. Individually, a SETTINGS - parameter can also be referred to as a "setting". - - - SETTINGS parameters are not negotiated; they describe characteristics of the sending peer, - which are used by the receiving peer. Different values for the same parameter can be - advertised by each peer. For example, a client might set a high initial flow control - window, whereas a server might set a lower value to conserve resources. - - - - A SETTINGS frame MUST be sent by both endpoints at the start of a connection, and MAY be - sent at any other time by either endpoint over the lifetime of the connection. - Implementations MUST support all of the parameters defined by this specification. - - - - Each parameter in a SETTINGS frame replaces any existing value for that parameter. - Parameters are processed in the order in which they appear, and a receiver of a SETTINGS - frame does not need to maintain any state other than the current value of its - parameters. Therefore, the value of a SETTINGS parameter is the last value that is seen by - a receiver. - - - SETTINGS parameters are acknowledged by the receiving peer. To enable this, the SETTINGS - frame defines the following flag: - - - Bit 1 being set indicates that this frame acknowledges receipt and application of the - peer's SETTINGS frame. When this bit is set, the payload of the SETTINGS frame MUST - be empty. Receipt of a SETTINGS frame with the ACK flag set and a length field value - other than 0 MUST be treated as a connection - error of type FRAME_SIZE_ERROR. For more info, see Settings Synchronization. - - - - - SETTINGS frames always apply to a connection, never a single stream. The stream - identifier for a SETTINGS frame MUST be zero (0x0). If an endpoint receives a SETTINGS - frame whose stream identifier field is anything other than 0x0, the endpoint MUST respond - with a connection error of type - PROTOCOL_ERROR. - - - The SETTINGS frame affects connection state. A badly formed or incomplete SETTINGS frame - MUST be treated as a connection error of type - PROTOCOL_ERROR. - - -
- - The payload of a SETTINGS frame consists of zero or more parameters, each consisting of - an unsigned 16-bit setting identifier and an unsigned 32-bit value. - - -
- -
-
- -
- - The following parameters are defined: - - - - Allows the sender to inform the remote endpoint of the maximum size of the header - compression table used to decode header blocks, in octets. The encoder can select - any size equal to or less than this value by using signaling specific to the - header compression format inside a header block. The initial value is 4,096 - octets. - - - - - This setting can be use to disable server - push. An endpoint MUST NOT send a PUSH_PROMISE frame if it - receives this parameter set to a value of 0. An endpoint that has both set this - parameter to 0 and had it acknowledged MUST treat the receipt of a - PUSH_PROMISE frame as a connection error of type - PROTOCOL_ERROR. - - - The initial value is 1, which indicates that server push is permitted. Any value - other than 0 or 1 MUST be treated as a connection error of type - PROTOCOL_ERROR. - - - - - Indicates the maximum number of concurrent streams that the sender will allow. - This limit is directional: it applies to the number of streams that the sender - permits the receiver to create. Initially there is no limit to this value. It is - recommended that this value be no smaller than 100, so as to not unnecessarily - limit parallelism. - - - A value of 0 for SETTINGS_MAX_CONCURRENT_STREAMS SHOULD NOT be treated as special - by endpoints. A zero value does prevent the creation of new streams, however this - can also happen for any limit that is exhausted with active streams. Servers - SHOULD only set a zero value for short durations; if a server does not wish to - accept requests, closing the connection could be preferable. - - - - - Indicates the sender's initial window size (in octets) for stream level flow - control. The initial value is 216-1 (65,535) octets. - - - This setting affects the window size of all streams, including existing streams, - see . - - - Values above the maximum flow control window size of 231-1 MUST - be treated as a connection error of - type FLOW_CONTROL_ERROR. - - - - - Indicates the size of the largest frame payload that the sender is willing to - receive, in octets. - - - The initial value is 214 (16,384) octets. The value advertised by - an endpoint MUST be between this initial value and the maximum allowed frame size - (224-1 or 16,777,215 octets), inclusive. Values outside this range - MUST be treated as a connection error - of type PROTOCOL_ERROR. - - - - - This advisory setting informs a peer of the maximum size of header list that the - sender is prepared to accept, in octets. The value is based on the uncompressed - size of header fields, including the length of the name and value in octets plus - an overhead of 32 octets for each header field. - - - For any given request, a lower limit than what is advertised MAY be enforced. The - initial value of this setting is unlimited. - - - - - - An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier - MUST ignore that setting. - -
- -
- - Most values in SETTINGS benefit from or require an understanding of when the peer has - received and applied the changed parameter values. In order to provide - such synchronization timepoints, the recipient of a SETTINGS frame in which the ACK flag - is not set MUST apply the updated parameters as soon as possible upon receipt. - - - The values in the SETTINGS frame MUST be processed in the order they appear, with no - other frame processing between values. Unsupported parameters MUST be ignored. Once - all values have been processed, the recipient MUST immediately emit a SETTINGS frame - with the ACK flag set. Upon receiving a SETTINGS frame with the ACK flag set, the sender - of the altered parameters can rely on the setting having been applied. - - - If the sender of a SETTINGS frame does not receive an acknowledgement within a - reasonable amount of time, it MAY issue a connection error of type - SETTINGS_TIMEOUT. - -
-
- -
- - The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of - streams the sender intends to initiate. The PUSH_PROMISE frame includes the unsigned - 31-bit identifier of the stream the endpoint plans to create along with a set of headers - that provide additional context for the stream. contains a - thorough description of the use of PUSH_PROMISE frames. - - -
- -
- - The PUSH_PROMISE frame payload has the following fields: - - - An 8-bit field containing the length of the frame padding in units of octets. This - field is only present if the PADDED flag is set. - - - A single reserved bit. - - - An unsigned 31-bit integer that identifies the stream that is reserved by the - PUSH_PROMISE. The promised stream identifier MUST be a valid choice for the next - stream sent by the sender (see new stream - identifier). - - - A header block fragment containing request header - fields. - - - Padding octets. - - - - - - The PUSH_PROMISE frame defines the following flags: - - - - Bit 3 being set indicates that this frame contains an entire header block and is not followed by any - CONTINUATION frames. - - - A PUSH_PROMISE frame without the END_HEADERS flag set MUST be followed by a - CONTINUATION frame for the same stream. A receiver MUST treat the receipt of any - other type of frame or a frame on a different stream as a connection error of type - PROTOCOL_ERROR. - - - - - Bit 4 being set indicates that the Pad Length field and any padding that it - describes is present. - - - - - - - PUSH_PROMISE frames MUST be associated with an existing, peer-initiated stream. The stream - identifier of a PUSH_PROMISE frame indicates the stream it is associated with. If the - stream identifier field specifies the value 0x0, a recipient MUST respond with a connection error of type - PROTOCOL_ERROR. - - - - Promised streams are not required to be used in the order they are promised. The - PUSH_PROMISE only reserves stream identifiers for later use. - - - - PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH setting of the - peer endpoint is set to 0. An endpoint that has set this setting and has received - acknowledgement MUST treat the receipt of a PUSH_PROMISE frame as a connection error of type - PROTOCOL_ERROR. - - - Recipients of PUSH_PROMISE frames can choose to reject promised streams by returning a - RST_STREAM referencing the promised stream identifier back to the sender of - the PUSH_PROMISE. - - - - A PUSH_PROMISE frame modifies the connection state in two ways. The inclusion of a header block potentially modifies the state maintained for - header compression. PUSH_PROMISE also reserves a stream for later use, causing the - promised stream to enter the "reserved" state. A sender MUST NOT send a PUSH_PROMISE on a - stream unless that stream is either "open" or "half closed (remote)"; the sender MUST - ensure that the promised stream is a valid choice for a new stream identifier (that is, the promised stream MUST - be in the "idle" state). - - - Since PUSH_PROMISE reserves a stream, ignoring a PUSH_PROMISE frame causes the stream - state to become indeterminate. A receiver MUST treat the receipt of a PUSH_PROMISE on a - stream that is neither "open" nor "half closed (local)" as a connection error of type - PROTOCOL_ERROR. However, an endpoint that has sent - RST_STREAM on the associated stream MUST handle PUSH_PROMISE frames that - might have been created before the RST_STREAM frame is received and - processed. - - - A receiver MUST treat the receipt of a PUSH_PROMISE that promises an illegal stream identifier (that is, an identifier for a - stream that is not currently in the "idle" state) as a connection error of type - PROTOCOL_ERROR. - - - - The PUSH_PROMISE frame includes optional padding. Padding fields and flags are identical - to those defined for DATA frames. - -
- -
- - The PING frame (type=0x6) is a mechanism for measuring a minimal round trip time from the - sender, as well as determining whether an idle connection is still functional. PING - frames can be sent from any endpoint. - -
- -
- - - In addition to the frame header, PING frames MUST contain 8 octets of data in the payload. - A sender can include any value it chooses and use those bytes in any fashion. - - - Receivers of a PING frame that does not include an ACK flag MUST send a PING frame with - the ACK flag set in response, with an identical payload. PING responses SHOULD be given - higher priority than any other frame. - - - - The PING frame defines the following flags: - - - Bit 1 being set indicates that this PING frame is a PING response. An endpoint MUST - set this flag in PING responses. An endpoint MUST NOT respond to PING frames - containing this flag. - - - - - PING frames are not associated with any individual stream. If a PING frame is received - with a stream identifier field value other than 0x0, the recipient MUST respond with a - connection error of type - PROTOCOL_ERROR. - - - Receipt of a PING frame with a length field value other than 8 MUST be treated as a connection error of type - FRAME_SIZE_ERROR. - - -
- -
- - The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this - connection. GOAWAY can be sent by either the client or the server. Once sent, the sender - will ignore frames sent on any new streams with identifiers higher than the included last - stream identifier. Receivers of a GOAWAY frame MUST NOT open additional streams on the - connection, although a new connection can be established for new streams. - - - The purpose of this frame is to allow an endpoint to gracefully stop accepting new - streams, while still finishing processing of previously established streams. This enables - administrative actions, like server maintenance. - - - There is an inherent race condition between an endpoint starting new streams and the - remote sending a GOAWAY frame. To deal with this case, the GOAWAY contains the stream - identifier of the last peer-initiated stream which was or might be processed on the - sending endpoint in this connection. For instance, if the server sends a GOAWAY frame, - the identified stream is the highest numbered stream initiated by the client. - - - If the receiver of the GOAWAY has sent data on streams with a higher stream identifier - than what is indicated in the GOAWAY frame, those streams are not or will not be - processed. The receiver of the GOAWAY frame can treat the streams as though they had - never been created at all, thereby allowing those streams to be retried later on a new - connection. - - - Endpoints SHOULD always send a GOAWAY frame before closing a connection so that the remote - can know whether a stream has been partially processed or not. For example, if an HTTP - client sends a POST at the same time that a server closes a connection, the client cannot - know if the server started to process that POST request if the server does not send a - GOAWAY frame to indicate what streams it might have acted on. - - - An endpoint might choose to close a connection without sending GOAWAY for misbehaving - peers. - - -
- -
- - The GOAWAY frame does not define any flags. - - - The GOAWAY frame applies to the connection, not a specific stream. An endpoint MUST treat - a GOAWAY frame with a stream identifier other than 0x0 as a connection error of type - PROTOCOL_ERROR. - - - The last stream identifier in the GOAWAY frame contains the highest numbered stream - identifier for which the sender of the GOAWAY frame might have taken some action on, or - might yet take action on. All streams up to and including the identified stream might - have been processed in some way. The last stream identifier can be set to 0 if no streams - were processed. - - - In this context, "processed" means that some data from the stream was passed to some - higher layer of software that might have taken some action as a result. - - - If a connection terminates without a GOAWAY frame, the last stream identifier is - effectively the highest possible stream identifier. - - - On streams with lower or equal numbered identifiers that were not closed completely prior - to the connection being closed, re-attempting requests, transactions, or any protocol - activity is not possible, with the exception of idempotent actions like HTTP GET, PUT, or - DELETE. Any protocol activity that uses higher numbered streams can be safely retried - using a new connection. - - - Activity on streams numbered lower or equal to the last stream identifier might still - complete successfully. The sender of a GOAWAY frame might gracefully shut down a - connection by sending a GOAWAY frame, maintaining the connection in an open state until - all in-progress streams complete. - - - An endpoint MAY send multiple GOAWAY frames if circumstances change. For instance, an - endpoint that sends GOAWAY with NO_ERROR during graceful shutdown could - subsequently encounter an condition that requires immediate termination of the connection. - The last stream identifier from the last GOAWAY frame received indicates which streams - could have been acted upon. Endpoints MUST NOT increase the value they send in the last - stream identifier, since the peers might already have retried unprocessed requests on - another connection. - - - A client that is unable to retry requests loses all requests that are in flight when the - server closes the connection. This is especially true for intermediaries that might - not be serving clients using HTTP/2. A server that is attempting to gracefully shut down - a connection SHOULD send an initial GOAWAY frame with the last stream identifier set to - 231-1 and a NO_ERROR code. This signals to the client that - a shutdown is imminent and that no further requests can be initiated. After waiting at - least one round trip time, the server can send another GOAWAY frame with an updated last - stream identifier. This ensures that a connection can be cleanly shut down without losing - requests. - - - - After sending a GOAWAY frame, the sender can discard frames for streams with identifiers - higher than the identified last stream. However, any frames that alter connection state - cannot be completely ignored. For instance, HEADERS, - PUSH_PROMISE and CONTINUATION frames MUST be minimally - processed to ensure the state maintained for header compression is consistent (see ); similarly DATA frames MUST be counted toward the connection flow - control window. Failure to process these frames can cause flow control or header - compression state to become unsynchronized. - - - - The GOAWAY frame also contains a 32-bit error code that - contains the reason for closing the connection. - - - Endpoints MAY append opaque data to the payload of any GOAWAY frame. Additional debug - data is intended for diagnostic purposes only and carries no semantic value. Debug - information could contain security- or privacy-sensitive data. Logged or otherwise - persistently stored debug data MUST have adequate safeguards to prevent unauthorized - access. - -
- -
- - The WINDOW_UPDATE frame (type=0x8) is used to implement flow control; see for an overview. - - - Flow control operates at two levels: on each individual stream and on the entire - connection. - - - Both types of flow control are hop-by-hop; that is, only between the two endpoints. - Intermediaries do not forward WINDOW_UPDATE frames between dependent connections. - However, throttling of data transfer by any receiver can indirectly cause the propagation - of flow control information toward the original sender. - - - Flow control only applies to frames that are identified as being subject to flow control. - Of the frame types defined in this document, this includes only DATA frames. - Frames that are exempt from flow control MUST be accepted and processed, unless the - receiver is unable to assign resources to handling the frame. A receiver MAY respond with - a stream error or connection error of type - FLOW_CONTROL_ERROR if it is unable to accept a frame. - -
- -
- - The payload of a WINDOW_UPDATE frame is one reserved bit, plus an unsigned 31-bit integer - indicating the number of octets that the sender can transmit in addition to the existing - flow control window. The legal range for the increment to the flow control window is 1 to - 231-1 (0x7fffffff) octets. - - - The WINDOW_UPDATE frame does not define any flags. - - - The WINDOW_UPDATE frame can be specific to a stream or to the entire connection. In the - former case, the frame's stream identifier indicates the affected stream; in the latter, - the value "0" indicates that the entire connection is the subject of the frame. - - - A receiver MUST treat the receipt of a WINDOW_UPDATE frame with an flow control window - increment of 0 as a stream error of type - PROTOCOL_ERROR; errors on the connection flow control window MUST be - treated as a connection error. - - - WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag. - This means that a receiver could receive a WINDOW_UPDATE frame on a "half closed (remote)" - or "closed" stream. A receiver MUST NOT treat this as an error, see . - - - A receiver that receives a flow controlled frame MUST always account for its contribution - against the connection flow control window, unless the receiver treats this as a connection error. This is necessary even if the - frame is in error. Since the sender counts the frame toward the flow control window, if - the receiver does not, the flow control window at sender and receiver can become - different. - - -
- - Flow control in HTTP/2 is implemented using a window kept by each sender on every - stream. The flow control window is a simple integer value that indicates how many octets - of data the sender is permitted to transmit; as such, its size is a measure of the - buffering capacity of the receiver. - - - Two flow control windows are applicable: the stream flow control window and the - connection flow control window. The sender MUST NOT send a flow controlled frame with a - length that exceeds the space available in either of the flow control windows advertised - by the receiver. Frames with zero length with the END_STREAM flag set (that is, an - empty DATA frame) MAY be sent if there is no available space in either - flow control window. - - - For flow control calculations, the 9 octet frame header is not counted. - - - After sending a flow controlled frame, the sender reduces the space available in both - windows by the length of the transmitted frame. - - - The receiver of a frame sends a WINDOW_UPDATE frame as it consumes data and frees up - space in flow control windows. Separate WINDOW_UPDATE frames are sent for the stream - and connection level flow control windows. - - - A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the - amount specified in the frame. - - - A sender MUST NOT allow a flow control window to exceed 231-1 octets. - If a sender receives a WINDOW_UPDATE that causes a flow control window to exceed this - maximum it MUST terminate either the stream or the connection, as appropriate. For - streams, the sender sends a RST_STREAM with the error code of - FLOW_CONTROL_ERROR code; for the connection, a GOAWAY - frame with a FLOW_CONTROL_ERROR code. - - - Flow controlled frames from the sender and WINDOW_UPDATE frames from the receiver are - completely asynchronous with respect to each other. This property allows a receiver to - aggressively update the window size kept by the sender to prevent streams from stalling. - -
- -
- - When an HTTP/2 connection is first established, new streams are created with an initial - flow control window size of 65,535 octets. The connection flow control window is 65,535 - octets. Both endpoints can adjust the initial window size for new streams by including - a value for SETTINGS_INITIAL_WINDOW_SIZE in the SETTINGS - frame that forms part of the connection preface. The connection flow control window can - only be changed using WINDOW_UPDATE frames. - - - Prior to receiving a SETTINGS frame that sets a value for - SETTINGS_INITIAL_WINDOW_SIZE, an endpoint can only use the default - initial window size when sending flow controlled frames. Similarly, the connection flow - control window is set to the default initial window size until a WINDOW_UPDATE frame is - received. - - - A SETTINGS frame can alter the initial flow control window size for all - current streams. When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, - a receiver MUST adjust the size of all stream flow control windows that it maintains by - the difference between the new value and the old value. - - - A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in - a flow control window to become negative. A sender MUST track the negative flow control - window, and MUST NOT send new flow controlled frames until it receives WINDOW_UPDATE - frames that cause the flow control window to become positive. - - - For example, if the client sends 60KB immediately on connection establishment, and the - server sets the initial window size to be 16KB, the client will recalculate the - available flow control window to be -44KB on receipt of the SETTINGS - frame. The client retains a negative flow control window until WINDOW_UPDATE frames - restore the window to being positive, after which the client can resume sending. - - - A SETTINGS frame cannot alter the connection flow control window. - - - An endpoint MUST treat a change to SETTINGS_INITIAL_WINDOW_SIZE that - causes any flow control window to exceed the maximum size as a connection error of type - FLOW_CONTROL_ERROR. - -
- -
- - A receiver that wishes to use a smaller flow control window than the current size can - send a new SETTINGS frame. However, the receiver MUST be prepared to - receive data that exceeds this window size, since the sender might send data that - exceeds the lower limit prior to processing the SETTINGS frame. - - - After sending a SETTINGS frame that reduces the initial flow control window size, a - receiver has two options for handling streams that exceed flow control limits: - - - The receiver can immediately send RST_STREAM with - FLOW_CONTROL_ERROR error code for the affected streams. - - - The receiver can accept the streams and tolerate the resulting head of line - blocking, sending WINDOW_UPDATE frames as it consumes data. - - - -
-
- -
- - The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments. Any number of CONTINUATION frames can - be sent on an existing stream, as long as the preceding frame is on the same stream and is - a HEADERS, PUSH_PROMISE or CONTINUATION frame without the - END_HEADERS flag set. - - -
- -
- - The CONTINUATION frame payload contains a header block - fragment. - - - - The CONTINUATION frame defines the following flag: - - - - Bit 3 being set indicates that this frame ends a header - block. - - - If the END_HEADERS bit is not set, this frame MUST be followed by another - CONTINUATION frame. A receiver MUST treat the receipt of any other type of frame or - a frame on a different stream as a connection - error of type PROTOCOL_ERROR. - - - - - - - The CONTINUATION frame changes the connection state as defined in . - - - - CONTINUATION frames MUST be associated with a stream. If a CONTINUATION frame is received - whose stream identifier field is 0x0, the recipient MUST respond with a connection error of type PROTOCOL_ERROR. - - - - A CONTINUATION frame MUST be preceded by a HEADERS, - PUSH_PROMISE or CONTINUATION frame without the END_HEADERS flag set. A - recipient that observes violation of this rule MUST respond with a connection error of type - PROTOCOL_ERROR. - -
-
- -
- - Error codes are 32-bit fields that are used in RST_STREAM and - GOAWAY frames to convey the reasons for the stream or connection error. - - - - Error codes share a common code space. Some error codes apply only to either streams or the - entire connection and have no defined semantics in the other context. - - - - The following error codes are defined: - - - The associated condition is not as a result of an error. For example, a - GOAWAY might include this code to indicate graceful shutdown of a - connection. - - - The endpoint detected an unspecific protocol error. This error is for use when a more - specific error code is not available. - - - The endpoint encountered an unexpected internal error. - - - The endpoint detected that its peer violated the flow control protocol. - - - The endpoint sent a SETTINGS frame, but did not receive a response in a - timely manner. See Settings Synchronization. - - - The endpoint received a frame after a stream was half closed. - - - The endpoint received a frame with an invalid size. - - - The endpoint refuses the stream prior to performing any application processing, see - for details. - - - Used by the endpoint to indicate that the stream is no longer needed. - - - The endpoint is unable to maintain the header compression context for the connection. - - - The connection established in response to a CONNECT - request was reset or abnormally closed. - - - The endpoint detected that its peer is exhibiting a behavior that might be generating - excessive load. - - - The underlying transport has properties that do not meet minimum security - requirements (see ). - - - - - Unknown or unsupported error codes MUST NOT trigger any special behavior. These MAY be - treated by an implementation as being equivalent to INTERNAL_ERROR. - -
- -
- - HTTP/2 is intended to be as compatible as possible with current uses of HTTP. This means - that, from the application perspective, the features of the protocol are largely - unchanged. To achieve this, all request and response semantics are preserved, although the - syntax of conveying those semantics has changed. - - - Thus, the specification and requirements of HTTP/1.1 Semantics and Content , Conditional Requests , Range Requests , Caching and Authentication are applicable to HTTP/2. Selected portions of HTTP/1.1 Message Syntax - and Routing , such as the HTTP and HTTPS URI schemes, are also - applicable in HTTP/2, but the expression of those semantics for this protocol are defined - in the sections below. - - -
- - A client sends an HTTP request on a new stream, using a previously unused stream identifier. A server sends an HTTP response on - the same stream as the request. - - - An HTTP message (request or response) consists of: - - - for a response only, zero or more HEADERS frames (each followed by zero - or more CONTINUATION frames) containing the message headers of - informational (1xx) HTTP responses (see and ), - and - - - one HEADERS frame (followed by zero or more CONTINUATION - frames) containing the message headers (see ), and - - - zero or more DATA frames containing the message payload (see ), and - - - optionally, one HEADERS frame, followed by zero or more - CONTINUATION frames containing the trailer-part, if present (see ). - - - The last frame in the sequence bears an END_STREAM flag, noting that a - HEADERS frame bearing the END_STREAM flag can be followed by - CONTINUATION frames that carry any remaining portions of the header block. - - - Other frames (from any stream) MUST NOT occur between either HEADERS frame - and any CONTINUATION frames that might follow. - - - - Trailing header fields are carried in a header block that also terminates the stream. - That is, a sequence starting with a HEADERS frame, followed by zero or more - CONTINUATION frames, where the HEADERS frame bears an - END_STREAM flag. Header blocks after the first that do not terminate the stream are not - part of an HTTP request or response. - - - A HEADERS frame (and associated CONTINUATION frames) can - only appear at the start or end of a stream. An endpoint that receives a - HEADERS frame without the END_STREAM flag set after receiving a final - (non-informational) status code MUST treat the corresponding request or response as malformed. - - - - An HTTP request/response exchange fully consumes a single stream. A request starts with - the HEADERS frame that puts the stream into an "open" state. The request - ends with a frame bearing END_STREAM, which causes the stream to become "half closed - (local)" for the client and "half closed (remote)" for the server. A response starts with - a HEADERS frame and ends with a frame bearing END_STREAM, which places the - stream in the "closed" state. - - - -
- - HTTP/2 removes support for the 101 (Switching Protocols) informational status code - (). - - - The semantics of 101 (Switching Protocols) aren't applicable to a multiplexed protocol. - Alternative protocols are able to use the same mechanisms that HTTP/2 uses to negotiate - their use (see ). - -
- -
- - HTTP header fields carry information as a series of key-value pairs. For a listing of - registered HTTP headers, see the Message Header Field Registry maintained at . - - -
- - While HTTP/1.x used the message start-line (see ) to convey the target URI and method of the request, and the - status code for the response, HTTP/2 uses special pseudo-header fields beginning with - ':' character (ASCII 0x3a) for this purpose. - - - Pseudo-header fields are not HTTP header fields. Endpoints MUST NOT generate - pseudo-header fields other than those defined in this document. - - - Pseudo-header fields are only valid in the context in which they are defined. - Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header - fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST - NOT appear in trailers. Endpoints MUST treat a request or response that contains - undefined or invalid pseudo-header fields as malformed. - - - Just as in HTTP/1.x, header field names are strings of ASCII characters that are - compared in a case-insensitive fashion. However, header field names MUST be converted - to lowercase prior to their encoding in HTTP/2. A request or response containing - uppercase header field names MUST be treated as malformed. - - - All pseudo-header fields MUST appear in the header block before regular header fields. - Any request or response that contains a pseudo-header field that appears in a header - block after a regular header field MUST be treated as malformed. - -
- -
- - HTTP/2 does not use the Connection header field to - indicate connection-specific header fields; in this protocol, connection-specific - metadata is conveyed by other means. An endpoint MUST NOT generate a HTTP/2 message - containing connection-specific header fields; any message containing - connection-specific header fields MUST be treated as malformed. - - - This means that an intermediary transforming an HTTP/1.x message to HTTP/2 will need - to remove any header fields nominated by the Connection header field, along with the - Connection header field itself. Such intermediaries SHOULD also remove other - connection-specific header fields, such as Keep-Alive, Proxy-Connection, - Transfer-Encoding and Upgrade, even if they are not nominated by Connection. - - - One exception to this is the TE header field, which MAY be present in an HTTP/2 - request, but when it is MUST NOT contain any value other than "trailers". - - - - - HTTP/2 purposefully does not support upgrade to another protocol. The handshake - methods described in are believed sufficient to - negotiate the use of alternative protocols. - - - -
- -
- - The following pseudo-header fields are defined for HTTP/2 requests: - - - - The :method pseudo-header field includes the HTTP - method (). - - - - - The :scheme pseudo-header field includes the scheme - portion of the target URI (). - - - :scheme is not restricted to http and https schemed URIs. A - proxy or gateway can translate requests for non-HTTP schemes, enabling the use - of HTTP to interact with non-HTTP services. - - - - - The :authority pseudo-header field includes the - authority portion of the target URI (). The authority MUST NOT include the deprecated userinfo subcomponent for http - or https schemed URIs. - - - To ensure that the HTTP/1.1 request line can be reproduced accurately, this - pseudo-header field MUST be omitted when translating from an HTTP/1.1 request - that has a request target in origin or asterisk form (see ). Clients that generate - HTTP/2 requests directly SHOULD use the :authority pseudo-header - field instead of the Host header field. An - intermediary that converts an HTTP/2 request to HTTP/1.1 MUST create a Host header field if one is not present in a request by - copying the value of the :authority pseudo-header - field. - - - - - The :path pseudo-header field includes the path and - query parts of the target URI (the path-absolute - production from and optionally a '?' character - followed by the query production, see and ). A request in asterisk form includes the value '*' for the - :path pseudo-header field. - - - This pseudo-header field MUST NOT be empty for http - or https URIs; http or - https URIs that do not contain a path component - MUST include a value of '/'. The exception to this rule is an OPTIONS request - for an http or https - URI that does not include a path component; these MUST include a :path pseudo-header field with a value of '*' (see ). - - - - - - All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request. An HTTP request that omits mandatory - pseudo-header fields is malformed. - - - HTTP/2 does not define a way to carry the version identifier that is included in the - HTTP/1.1 request line. - -
- -
- - For HTTP/2 responses, a single :status pseudo-header - field is defined that carries the HTTP status code field (see ). This pseudo-header field MUST be included in all - responses, otherwise the response is malformed. - - - HTTP/2 does not define a way to carry the version or reason phrase that is included in - an HTTP/1.1 status line. - -
- -
- - The Cookie header field can carry a significant amount of - redundant data. - - - The Cookie header field uses a semi-colon (";") to delimit cookie-pairs (or "crumbs"). - This header field doesn't follow the list construction rules in HTTP (see ), which prevents cookie-pairs from - being separated into different name-value pairs. This can significantly reduce - compression efficiency as individual cookie-pairs are updated. - - - To allow for better compression efficiency, the Cookie header field MAY be split into - separate header fields, each with one or more cookie-pairs. If there are multiple - Cookie header fields after decompression, these MUST be concatenated into a single - octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII string "; ") - before being passed into a non-HTTP/2 context, such as an HTTP/1.1 connection, or a - generic HTTP server application. - -
- - Therefore, the following two lists of Cookie header fields are semantically - equivalent. - - -
-
- -
- - A malformed request or response is one that is an otherwise valid sequence of HTTP/2 - frames, but is otherwise invalid due to the presence of extraneous frames, prohibited - header fields, the absence of mandatory header fields, or the inclusion of uppercase - header field names. - - - A request or response that includes an entity body can include a content-length header field. A request or response is also - malformed if the value of a content-length header field - does not equal the sum of the DATA frame payload lengths that form the - body. A response that is defined to have no payload, as described in , can have a non-zero - content-length header field, even though no content is - included in DATA frames. - - - Intermediaries that process HTTP requests or responses (i.e., any intermediary not - acting as a tunnel) MUST NOT forward a malformed request or response. Malformed - requests or responses that are detected MUST be treated as a stream error of type PROTOCOL_ERROR. - - - For malformed requests, a server MAY send an HTTP response prior to closing or - resetting the stream. Clients MUST NOT accept a malformed response. Note that these - requirements are intended to protect against several types of common attacks against - HTTP; they are deliberately strict, because being permissive can expose - implementations to these vulnerabilities. - -
-
- -
- - This section shows HTTP/1.1 requests and responses, with illustrations of equivalent - HTTP/2 requests and responses. - - - An HTTP GET request includes request header fields and no body and is therefore - transmitted as a single HEADERS frame, followed by zero or more - CONTINUATION frames containing the serialized block of request header - fields. The HEADERS frame in the following has both the END_HEADERS and - END_STREAM flags set; no CONTINUATION frames are sent: - - -
- + END_STREAM - Accept: image/jpeg + END_HEADERS - :method = GET - :scheme = https - :path = /resource - host = example.org - accept = image/jpeg -]]> -
- - - Similarly, a response that includes only response header fields is transmitted as a - HEADERS frame (again, followed by zero or more - CONTINUATION frames) containing the serialized block of response header - fields. - - -
- + END_STREAM - Expires: Thu, 23 Jan ... + END_HEADERS - :status = 304 - etag = "xyzzy" - expires = Thu, 23 Jan ... -]]> -
- - - An HTTP POST request that includes request header fields and payload data is transmitted - as one HEADERS frame, followed by zero or more - CONTINUATION frames containing the request header fields, followed by one - or more DATA frames, with the last CONTINUATION (or - HEADERS) frame having the END_HEADERS flag set and the final - DATA frame having the END_STREAM flag set: - - -
- - END_STREAM - Content-Type: image/jpeg - END_HEADERS - Content-Length: 123 :method = POST - :path = /resource - {binary data} :scheme = https - - CONTINUATION - + END_HEADERS - content-type = image/jpeg - host = example.org - content-length = 123 - - DATA - + END_STREAM - {binary data} -]]> - - Note that data contributing to any given header field could be spread between header - block fragments. The allocation of header fields to frames in this example is - illustrative only. - -
- - - A response that includes header fields and payload data is transmitted as a - HEADERS frame, followed by zero or more CONTINUATION - frames, followed by one or more DATA frames, with the last - DATA frame in the sequence having the END_STREAM flag set: - - -
- - END_STREAM - Content-Length: 123 + END_HEADERS - :status = 200 - {binary data} content-type = image/jpeg - content-length = 123 - - DATA - + END_STREAM - {binary data} -]]> -
- - - Trailing header fields are sent as a header block after both the request or response - header block and all the DATA frames have been sent. The - HEADERS frame starting the trailers header block has the END_STREAM flag - set. - - -
- - END_STREAM - Transfer-Encoding: chunked + END_HEADERS - Trailer: Foo :status = 200 - content-length = 123 - 123 content-type = image/jpeg - {binary data} trailer = Foo - 0 - Foo: bar DATA - - END_STREAM - {binary data} - - HEADERS - + END_STREAM - + END_HEADERS - foo = bar -]]> -
- - -
- - An informational response using a 1xx status code other than 101 is transmitted as a - HEADERS frame, followed by zero or more CONTINUATION - frames: - - - END_STREAM - + END_HEADERS - :status = 103 - extension-field = bar -]]> -
-
- -
- - In HTTP/1.1, an HTTP client is unable to retry a non-idempotent request when an error - occurs, because there is no means to determine the nature of the error. It is possible - that some server processing occurred prior to the error, which could result in - undesirable effects if the request were reattempted. - - - HTTP/2 provides two mechanisms for providing a guarantee to a client that a request has - not been processed: - - - The GOAWAY frame indicates the highest stream number that might have - been processed. Requests on streams with higher numbers are therefore guaranteed to - be safe to retry. - - - The REFUSED_STREAM error code can be included in a - RST_STREAM frame to indicate that the stream is being closed prior to - any processing having occurred. Any request that was sent on the reset stream can - be safely retried. - - - - - Requests that have not been processed have not failed; clients MAY automatically retry - them, even those with non-idempotent methods. - - - A server MUST NOT indicate that a stream has not been processed unless it can guarantee - that fact. If frames that are on a stream are passed to the application layer for any - stream, then REFUSED_STREAM MUST NOT be used for that stream, and a - GOAWAY frame MUST include a stream identifier that is greater than or - equal to the given stream identifier. - - - In addition to these mechanisms, the PING frame provides a way for a - client to easily test a connection. Connections that remain idle can become broken as - some middleboxes (for instance, network address translators, or load balancers) silently - discard connection bindings. The PING frame allows a client to safely - test whether a connection is still active without sending a request. - -
-
- -
- - HTTP/2 allows a server to pre-emptively send (or "push") responses (along with - corresponding "promised" requests) to a client in association with a previous - client-initiated request. This can be useful when the server knows the client will need - to have those responses available in order to fully process the response to the original - request. - - - - Pushing additional message exchanges in this fashion is optional, and is negotiated - between individual endpoints. The SETTINGS_ENABLE_PUSH setting can be set - to 0 to indicate that server push is disabled. - - - Promised requests MUST be cacheable (see ), MUST be safe (see ) and MUST NOT include a request body. Clients that receive a - promised request that is not cacheable, unsafe or that includes a request body MUST - reset the stream with a stream error of type - PROTOCOL_ERROR. - - - Pushed responses that are cacheable (see ) can be stored by the client, if it implements a HTTP - cache. Pushed responses are considered successfully validated on the origin server (e.g., - if the "no-cache" cache response directive is present) while the stream identified by the - promised stream ID is still open. - - - Pushed responses that are not cacheable MUST NOT be stored by any HTTP cache. They MAY - be made available to the application separately. - - - An intermediary can receive pushes from the server and choose not to forward them on to - the client. In other words, how to make use of the pushed information is up to that - intermediary. Equally, the intermediary might choose to make additional pushes to the - client, without any action taken by the server. - - - A client cannot push. Thus, servers MUST treat the receipt of a - PUSH_PROMISE frame as a connection - error of type PROTOCOL_ERROR. Clients MUST reject any attempt to - change the SETTINGS_ENABLE_PUSH setting to a value other than 0 by treating - the message as a connection error of type - PROTOCOL_ERROR. - - -
- - Server push is semantically equivalent to a server responding to a request; however, in - this case that request is also sent by the server, as a PUSH_PROMISE - frame. - - - The PUSH_PROMISE frame includes a header block that contains a complete - set of request header fields that the server attributes to the request. It is not - possible to push a response to a request that includes a request body. - - - - Pushed responses are always associated with an explicit request from the client. The - PUSH_PROMISE frames sent by the server are sent on that explicit - request's stream. The PUSH_PROMISE frame also includes a promised stream - identifier, chosen from the stream identifiers available to the server (see ). - - - - The header fields in PUSH_PROMISE and any subsequent - CONTINUATION frames MUST be a valid and complete set of request header fields. The server MUST include a method in - the :method header field that is safe and cacheable. If a - client receives a PUSH_PROMISE that does not include a complete and valid - set of header fields, or the :method header field identifies - a method that is not safe, it MUST respond with a stream error of type PROTOCOL_ERROR. - - - - The server SHOULD send PUSH_PROMISE () - frames prior to sending any frames that reference the promised responses. This avoids a - race where clients issue requests prior to receiving any PUSH_PROMISE - frames. - - - For example, if the server receives a request for a document containing embedded links - to multiple image files, and the server chooses to push those additional images to the - client, sending push promises before the DATA frames that contain the - image links ensures that the client is able to see the promises before discovering - embedded links. Similarly, if the server pushes responses referenced by the header block - (for instance, in Link header fields), sending the push promises before sending the - header block ensures that clients do not request them. - - - - PUSH_PROMISE frames MUST NOT be sent by the client. - - - PUSH_PROMISE frames can be sent by the server in response to any - client-initiated stream, but the stream MUST be in either the "open" or "half closed - (remote)" state with respect to the server. PUSH_PROMISE frames are - interspersed with the frames that comprise a response, though they cannot be - interspersed with HEADERS and CONTINUATION frames that - comprise a single header block. - - - Sending a PUSH_PROMISE frame creates a new stream and puts the stream - into the “reserved (local)” state for the server and the “reserved (remote)” state for - the client. - -
- -
- - After sending the PUSH_PROMISE frame, the server can begin delivering the - pushed response as a response on a server-initiated - stream that uses the promised stream identifier. The server uses this stream to - transmit an HTTP response, using the same sequence of frames as defined in . This stream becomes "half closed" - to the client after the initial HEADERS frame is sent. - - - - Once a client receives a PUSH_PROMISE frame and chooses to accept the - pushed response, the client SHOULD NOT issue any requests for the promised response - until after the promised stream has closed. - - - - If the client determines, for any reason, that it does not wish to receive the pushed - response from the server, or if the server takes too long to begin sending the promised - response, the client can send an RST_STREAM frame, using either the - CANCEL or REFUSED_STREAM codes, and referencing the pushed - stream's identifier. - - - A client can use the SETTINGS_MAX_CONCURRENT_STREAMS setting to limit the - number of responses that can be concurrently pushed by a server. Advertising a - SETTINGS_MAX_CONCURRENT_STREAMS value of zero disables server push by - preventing the server from creating the necessary streams. This does not prohibit a - server from sending PUSH_PROMISE frames; clients need to reset any - promised streams that are not wanted. - - - - Clients receiving a pushed response MUST validate that either the server is - authoritative (see ), or the proxy that provided the pushed - response is configured for the corresponding request. For example, a server that offers - a certificate for only the example.com DNS-ID or Common Name - is not permitted to push a response for https://www.example.org/doc. - - - The response for a PUSH_PROMISE stream begins with a - HEADERS frame, which immediately puts the stream into the “half closed - (remote)” state for the server and “half closed (local)” state for the client, and ends - with a frame bearing END_STREAM, which places the stream in the "closed" state. - - - The client never sends a frame with the END_STREAM flag for a server push. - - - -
- -
- -
- - In HTTP/1.x, the pseudo-method CONNECT () is used to convert an HTTP connection into a tunnel to a remote host. - CONNECT is primarily used with HTTP proxies to establish a TLS session with an origin - server for the purposes of interacting with https resources. - - - In HTTP/2, the CONNECT method is used to establish a tunnel over a single HTTP/2 stream to - a remote host, for similar purposes. The HTTP header field mapping works as defined in - Request Header Fields, with a few - differences. Specifically: - - - The :method header field is set to CONNECT. - - - The :scheme and :path header - fields MUST be omitted. - - - The :authority header field contains the host and port to - connect to (equivalent to the authority-form of the request-target of CONNECT - requests, see ). - - - - - A proxy that supports CONNECT establishes a TCP connection to - the server identified in the :authority header field. Once - this connection is successfully established, the proxy sends a HEADERS - frame containing a 2xx series status code to the client, as defined in . - - - After the initial HEADERS frame sent by each peer, all subsequent - DATA frames correspond to data sent on the TCP connection. The payload of - any DATA frames sent by the client is transmitted by the proxy to the TCP - server; data received from the TCP server is assembled into DATA frames by - the proxy. Frame types other than DATA or stream management frames - (RST_STREAM, WINDOW_UPDATE, and PRIORITY) - MUST NOT be sent on a connected stream, and MUST be treated as a stream error if received. - - - The TCP connection can be closed by either peer. The END_STREAM flag on a - DATA frame is treated as being equivalent to the TCP FIN bit. A client is - expected to send a DATA frame with the END_STREAM flag set after receiving - a frame bearing the END_STREAM flag. A proxy that receives a DATA frame - with the END_STREAM flag set sends the attached data with the FIN bit set on the last TCP - segment. A proxy that receives a TCP segment with the FIN bit set sends a - DATA frame with the END_STREAM flag set. Note that the final TCP segment - or DATA frame could be empty. - - - A TCP connection error is signaled with RST_STREAM. A proxy treats any - error in the TCP connection, which includes receiving a TCP segment with the RST bit set, - as a stream error of type - CONNECT_ERROR. Correspondingly, a proxy MUST send a TCP segment with the - RST bit set if it detects an error with the stream or the HTTP/2 connection. - -
-
- -
- - This section outlines attributes of the HTTP protocol that improve interoperability, reduce - exposure to known security vulnerabilities, or reduce the potential for implementation - variation. - - -
- - HTTP/2 connections are persistent. For best performance, it is expected clients will not - close connections until it is determined that no further communication with a server is - necessary (for example, when a user navigates away from a particular web page), or until - the server closes the connection. - - - Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair, - where host is derived from a URI, a selected alternative - service, or a configured proxy. - - - A client can create additional connections as replacements, either to replace connections - that are near to exhausting the available stream - identifier space, to refresh the keying material for a TLS connection, or to - replace connections that have encountered errors. - - - A client MAY open multiple connections to the same IP address and TCP port using different - Server Name Indication values or to provide different TLS - client certificates, but SHOULD avoid creating multiple connections with the same - configuration. - - - Servers are encouraged to maintain open connections for as long as possible, but are - permitted to terminate idle connections if necessary. When either endpoint chooses to - close the transport-layer TCP connection, the terminating endpoint SHOULD first send a - GOAWAY () frame so that both endpoints can reliably - determine whether previously sent frames have been processed and gracefully complete or - terminate any necessary remaining tasks. - - -
- - Connections that are made to an origin servers, either directly or through a tunnel - created using the CONNECT method MAY be reused for - requests with multiple different URI authority components. A connection can be reused - as long as the origin server is authoritative. For - http resources, this depends on the host having resolved to - the same IP address. - - - For https resources, connection reuse additionally depends - on having a certificate that is valid for the host in the URI. An origin server might - offer a certificate with multiple subjectAltName attributes, - or names with wildcards, one of which is valid for the authority in the URI. For - example, a certificate with a subjectAltName of *.example.com might permit the use of the same connection for - requests to URIs starting with https://a.example.com/ and - https://b.example.com/. - - - In some deployments, reusing a connection for multiple origins can result in requests - being directed to the wrong origin server. For example, TLS termination might be - performed by a middlebox that uses the TLS Server Name Indication - (SNI) extension to select an origin server. This means that it is possible - for clients to send confidential information to servers that might not be the intended - target for the request, even though the server is otherwise authoritative. - - - A server that does not wish clients to reuse connections can indicate that it is not - authoritative for a request by sending a 421 (Misdirected Request) status code in response - to the request (see ). - - - A client that is configured to use a proxy over HTTP/2 directs requests to that proxy - through a single connection. That is, all requests sent via a proxy reuse the - connection to the proxy. - -
- -
- - The 421 (Misdirected Request) status code indicates that the request was directed at a - server that is not able to produce a response. This can be sent by a server that is not - configured to produce responses for the combination of scheme and authority that are - included in the request URI. - - - Clients receiving a 421 (Misdirected Request) response from a server MAY retry the - request - whether the request method is idempotent or not - over a different connection. - This is possible if a connection is reused () or if an alternative - service is selected (). - - - This status code MUST NOT be generated by proxies. - - - A 421 response is cacheable by default; i.e., unless otherwise indicated by the method - definition or explicit cache controls (see ). - -
-
- -
- - Implementations of HTTP/2 MUST support TLS 1.2 for HTTP/2 over - TLS. The general TLS usage guidance in SHOULD be followed, with - some additional restrictions that are specific to HTTP/2. - - - - An implementation of HTTP/2 over TLS MUST use TLS 1.2 or higher with the restrictions on - feature set and cipher suite described in this section. Due to implementation - limitations, it might not be possible to fail TLS negotiation. An endpoint MUST - immediately terminate an HTTP/2 connection that does not meet these minimum requirements - with a connection error of type - INADEQUATE_SECURITY. - - -
- - The TLS implementation MUST support the Server Name Indication - (SNI) extension to TLS. HTTP/2 clients MUST indicate the target domain name when - negotiating TLS. - - - The TLS implementation MUST disable compression. TLS compression can lead to the - exposure of information that would not otherwise be revealed . - Generic compression is unnecessary since HTTP/2 provides compression features that are - more aware of context and therefore likely to be more appropriate for use for - performance, security or other reasons. - - - The TLS implementation MUST disable renegotiation. An endpoint MUST treat a TLS - renegotiation as a connection error of type - PROTOCOL_ERROR. Note that disabling renegotiation can result in - long-lived connections becoming unusable due to limits on the number of messages the - underlying cipher suite can encipher. - - - A client MAY use renegotiation to provide confidentiality protection for client - credentials offered in the handshake, but any renegotiation MUST occur prior to sending - the connection preface. A server SHOULD request a client certificate if it sees a - renegotiation request immediately after establishing a connection. - - - This effectively prevents the use of renegotiation in response to a request for a - specific protected resource. A future specification might provide a way to support this - use case. - -
- -
- - The set of TLS cipher suites that are permitted in HTTP/2 is restricted. HTTP/2 MUST - only be used with cipher suites that have ephemeral key exchange, such as the ephemeral Diffie-Hellman (DHE) or the elliptic curve variant (ECDHE). Ephemeral key exchange MUST - have a minimum size of 2048 bits for DHE or security level of 128 bits for ECDHE. - Clients MUST accept DHE sizes of up to 4096 bits. HTTP MUST NOT be used with cipher - suites that use stream or block ciphers. Authenticated Encryption with Additional Data - (AEAD) modes, such as the Galois Counter Model (GCM) mode for - AES are acceptable. - - - The effect of these restrictions is that TLS 1.2 implementations could have - non-intersecting sets of available cipher suites, since these prevent the use of the - cipher suite that TLS 1.2 makes mandatory. To avoid this problem, implementations of - HTTP/2 that use TLS 1.2 MUST support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 with P256 . - - - Clients MAY advertise support of cipher suites that are prohibited by the above - restrictions in order to allow for connection to servers that do not support HTTP/2. - This enables a fallback to protocols without these constraints without the additional - latency imposed by using a separate connection for fallback. - -
-
-
- -
-
- - HTTP/2 relies on the HTTP/1.1 definition of authority for determining whether a server is - authoritative in providing a given response, see . This relies on local name resolution for the "http" - URI scheme, and the authenticated server identity for the "https" scheme (see ). - -
- -
- - In a cross-protocol attack, an attacker causes a client to initiate a transaction in one - protocol toward a server that understands a different protocol. An attacker might be able - to cause the transaction to appear as valid transaction in the second protocol. In - combination with the capabilities of the web context, this can be used to interact with - poorly protected servers in private networks. - - - Completing a TLS handshake with an ALPN identifier for HTTP/2 can be considered sufficient - protection against cross protocol attacks. ALPN provides a positive indication that a - server is willing to proceed with HTTP/2, which prevents attacks on other TLS-based - protocols. - - - The encryption in TLS makes it difficult for attackers to control the data which could be - used in a cross-protocol attack on a cleartext protocol. - - - The cleartext version of HTTP/2 has minimal protection against cross-protocol attacks. - The connection preface contains a string that is - designed to confuse HTTP/1.1 servers, but no special protection is offered for other - protocols. A server that is willing to ignore parts of an HTTP/1.1 request containing an - Upgrade header field in addition to the client connection preface could be exposed to a - cross-protocol attack. - -
- -
- - HTTP/2 header field names and values are encoded as sequences of octets with a length - prefix. This enables HTTP/2 to carry any string of octets as the name or value of a - header field. An intermediary that translates HTTP/2 requests or responses into HTTP/1.1 - directly could permit the creation of corrupted HTTP/1.1 messages. An attacker might - exploit this behavior to cause the intermediary to create HTTP/1.1 messages with illegal - header fields, extra header fields, or even new messages that are entirely falsified. - - - Header field names or values that contain characters not permitted by HTTP/1.1, including - carriage return (ASCII 0xd) or line feed (ASCII 0xa) MUST NOT be translated verbatim by an - intermediary, as stipulated in . - - - Translation from HTTP/1.x to HTTP/2 does not produce the same opportunity to an attacker. - Intermediaries that perform translation to HTTP/2 MUST remove any instances of the obs-fold production from header field values. - -
- -
- - Pushed responses do not have an explicit request from the client; the request - is provided by the server in the PUSH_PROMISE frame. - - - Caching responses that are pushed is possible based on the guidance provided by the origin - server in the Cache-Control header field. However, this can cause issues if a single - server hosts more than one tenant. For example, a server might offer multiple users each - a small portion of its URI space. - - - Where multiple tenants share space on the same server, that server MUST ensure that - tenants are not able to push representations of resources that they do not have authority - over. Failure to enforce this would allow a tenant to provide a representation that would - be served out of cache, overriding the actual representation that the authoritative tenant - provides. - - - Pushed responses for which an origin server is not authoritative (see - ) are never cached or used. - -
- -
- - An HTTP/2 connection can demand a greater commitment of resources to operate than a - HTTP/1.1 connection. The use of header compression and flow control depend on a - commitment of resources for storing a greater amount of state. Settings for these - features ensure that memory commitments for these features are strictly bounded. - - - The number of PUSH_PROMISE frames is not constrained in the same fashion. - A client that accepts server push SHOULD limit the number of streams it allows to be in - the "reserved (remote)" state. Excessive number of server push streams can be treated as - a stream error of type - ENHANCE_YOUR_CALM. - - - Processing capacity cannot be guarded as effectively as state capacity. - - - The SETTINGS frame can be abused to cause a peer to expend additional - processing time. This might be done by pointlessly changing SETTINGS parameters, setting - multiple undefined parameters, or changing the same setting multiple times in the same - frame. WINDOW_UPDATE or PRIORITY frames can be abused to - cause an unnecessary waste of resources. - - - Large numbers of small or empty frames can be abused to cause a peer to expend time - processing frame headers. Note however that some uses are entirely legitimate, such as - the sending of an empty DATA frame to end a stream. - - - Header compression also offers some opportunities to waste processing resources; see for more details on potential abuses. - - - Limits in SETTINGS parameters cannot be reduced instantaneously, which - leaves an endpoint exposed to behavior from a peer that could exceed the new limits. In - particular, immediately after establishing a connection, limits set by a server are not - known to clients and could be exceeded without being an obvious protocol violation. - - - All these features - i.e., SETTINGS changes, small frames, header - compression - have legitimate uses. These features become a burden only when they are - used unnecessarily or to excess. - - - An endpoint that doesn't monitor this behavior exposes itself to a risk of denial of - service attack. Implementations SHOULD track the use of these features and set limits on - their use. An endpoint MAY treat activity that is suspicious as a connection error of type - ENHANCE_YOUR_CALM. - - -
- - A large header block can cause an implementation to - commit a large amount of state. Header fields that are critical for routing can appear - toward the end of a header block, which prevents streaming of header fields to their - ultimate destination. For this an other reasons, such as ensuring cache correctness, - means that an endpoint might need to buffer the entire header block. Since there is no - hard limit to the size of a header block, some endpoints could be forced commit a large - amount of available memory for header fields. - - - An endpoint can use the SETTINGS_MAX_HEADER_LIST_SIZE to advise peers of - limits that might apply on the size of header blocks. This setting is only advisory, so - endpoints MAY choose to send header blocks that exceed this limit and risk having the - request or response being treated as malformed. This setting specific to a connection, - so any request or response could encounter a hop with a lower, unknown limit. An - intermediary can attempt to avoid this problem by passing on values presented by - different peers, but they are not obligated to do so. - - - A server that receives a larger header block than it is willing to handle can send an - HTTP 431 (Request Header Fields Too Large) status code . A - client can discard responses that it cannot process. The header block MUST be processed - to ensure a consistent connection state, unless the connection is closed. - -
-
- -
- - HTTP/2 enables greater use of compression for both header fields () and entity bodies. Compression can allow an attacker to recover - secret data when it is compressed in the same context as data under attacker control. - - - There are demonstrable attacks on compression that exploit the characteristics of the web - (e.g., ). The attacker induces multiple requests containing - varying plaintext, observing the length of the resulting ciphertext in each, which - reveals a shorter length when a guess about the secret is correct. - - - Implementations communicating on a secure channel MUST NOT compress content that includes - both confidential and attacker-controlled data unless separate compression dictionaries - are used for each source of data. Compression MUST NOT be used if the source of data - cannot be reliably determined. Generic stream compression, such as that provided by TLS - MUST NOT be used with HTTP/2 (). - - - Further considerations regarding the compression of header fields are described in . - -
- -
- - Padding within HTTP/2 is not intended as a replacement for general purpose padding, such - as might be provided by TLS. Redundant padding could even be - counterproductive. Correct application can depend on having specific knowledge of the - data that is being padded. - - - To mitigate attacks that rely on compression, disabling or limiting compression might be - preferable to padding as a countermeasure. - - - Padding can be used to obscure the exact size of frame content, and is provided to - mitigate specific attacks within HTTP. For example, attacks where compressed content - includes both attacker-controlled plaintext and secret data (see for example, ). - - - Use of padding can result in less protection than might seem immediately obvious. At - best, padding only makes it more difficult for an attacker to infer length information by - increasing the number of frames an attacker has to observe. Incorrectly implemented - padding schemes can be easily defeated. In particular, randomized padding with a - predictable distribution provides very little protection; similarly, padding payloads to a - fixed size exposes information as payload sizes cross the fixed size boundary, which could - be possible if an attacker can control plaintext. - - - Intermediaries SHOULD retain padding for DATA frames, but MAY drop padding - for HEADERS and PUSH_PROMISE frames. A valid reason for an - intermediary to change the amount of padding of frames is to improve the protections that - padding provides. - -
- -
- - Several characteristics of HTTP/2 provide an observer an opportunity to correlate actions - of a single client or server over time. This includes the value of settings, the manner - in which flow control windows are managed, the way priorities are allocated to streams, - timing of reactions to stimulus, and handling of any optional features. - - - As far as this creates observable differences in behavior, they could be used as a basis - for fingerprinting a specific client, as defined in . - -
-
- -
- - A string for identifying HTTP/2 is entered into the "Application Layer Protocol Negotiation - (ALPN) Protocol IDs" registry established in . - - - This document establishes a registry for frame types, settings, and error codes. These new - registries are entered into a new "Hypertext Transfer Protocol (HTTP) 2 Parameters" section. - - - This document registers the HTTP2-Settings header field for - use in HTTP; and the 421 (Misdirected Request) status code. - - - This document registers the PRI method for use in HTTP, to avoid - collisions with the connection preface. - - -
- - This document creates two registrations for the identification of HTTP/2 in the - "Application Layer Protocol Negotiation (ALPN) Protocol IDs" registry established in . - - - The "h2" string identifies HTTP/2 when used over TLS: - - HTTP/2 over TLS - 0x68 0x32 ("h2") - This document - - - - The "h2c" string identifies HTTP/2 when used over cleartext TCP: - - HTTP/2 over TCP - 0x68 0x32 0x63 ("h2c") - This document - - -
- -
- - This document establishes a registry for HTTP/2 frame type codes. The "HTTP/2 Frame - Type" registry manages an 8-bit space. The "HTTP/2 Frame Type" registry operates under - either of the "IETF Review" or "IESG Approval" policies for - values between 0x00 and 0xef, with values between 0xf0 and 0xff being reserved for - experimental use. - - - New entries in this registry require the following information: - - - A name or label for the frame type. - - - The 8-bit code assigned to the frame type. - - - A reference to a specification that includes a description of the frame layout, - it's semantics and flags that the frame type uses, including any parts of the frame - that are conditionally present based on the value of flags. - - - - - The entries in the following table are registered by this document. - - - Frame Type - Code - Section - DATA0x0 - HEADERS0x1 - PRIORITY0x2 - RST_STREAM0x3 - SETTINGS0x4 - PUSH_PROMISE0x5 - PING0x6 - GOAWAY0x7 - WINDOW_UPDATE0x8 - CONTINUATION0x9 - -
- -
- - This document establishes a registry for HTTP/2 settings. The "HTTP/2 Settings" registry - manages a 16-bit space. The "HTTP/2 Settings" registry operates under the "Expert Review" policy for values in the range from 0x0000 to - 0xefff, with values between and 0xf000 and 0xffff being reserved for experimental use. - - - New registrations are advised to provide the following information: - - - A symbolic name for the setting. Specifying a setting name is optional. - - - The 16-bit code assigned to the setting. - - - An initial value for the setting. - - - An optional reference to a specification that describes the use of the setting. - - - - - An initial set of setting registrations can be found in . - - - Name - Code - Initial Value - Specification - HEADER_TABLE_SIZE - 0x14096 - ENABLE_PUSH - 0x21 - MAX_CONCURRENT_STREAMS - 0x3(infinite) - INITIAL_WINDOW_SIZE - 0x465535 - MAX_FRAME_SIZE - 0x516384 - MAX_HEADER_LIST_SIZE - 0x6(infinite) - - -
- -
- - This document establishes a registry for HTTP/2 error codes. The "HTTP/2 Error Code" - registry manages a 32-bit space. The "HTTP/2 Error Code" registry operates under the - "Expert Review" policy. - - - Registrations for error codes are required to include a description of the error code. An - expert reviewer is advised to examine new registrations for possible duplication with - existing error codes. Use of existing registrations is to be encouraged, but not - mandated. - - - New registrations are advised to provide the following information: - - - A name for the error code. Specifying an error code name is optional. - - - The 32-bit error code value. - - - A brief description of the error code semantics, longer if no detailed specification - is provided. - - - An optional reference for a specification that defines the error code. - - - - - The entries in the following table are registered by this document. - - - Name - Code - Description - Specification - NO_ERROR0x0 - Graceful shutdown - - PROTOCOL_ERROR0x1 - Protocol error detected - - INTERNAL_ERROR0x2 - Implementation fault - - FLOW_CONTROL_ERROR0x3 - Flow control limits exceeded - - SETTINGS_TIMEOUT0x4 - Settings not acknowledged - - STREAM_CLOSED0x5 - Frame received for closed stream - - FRAME_SIZE_ERROR0x6 - Frame size incorrect - - REFUSED_STREAM0x7 - Stream not processed - - CANCEL0x8 - Stream cancelled - - COMPRESSION_ERROR0x9 - Compression state not updated - - CONNECT_ERROR0xa - TCP connection error for CONNECT method - - ENHANCE_YOUR_CALM0xb - Processing capacity exceeded - - INADEQUATE_SECURITY0xc - Negotiated TLS parameters not acceptable - - - -
- -
- - This section registers the HTTP2-Settings header field in the - Permanent Message Header Field Registry. - - - HTTP2-Settings - - - http - - - standard - - - IETF - - - of this document - - - This header field is only used by an HTTP/2 client for Upgrade-based negotiation. - - - -
- -
- - This section registers the PRI method in the HTTP Method - Registry (). - - - PRI - - - No - - - No - - - of this document - - - This method is never used by an actual client. This method will appear to be used - when an HTTP/1.1 server or intermediary attempts to parse an HTTP/2 connection - preface. - - - -
- -
- - This document registers the 421 (Misdirected Request) HTTP Status code in the Hypertext - Transfer Protocol (HTTP) Status Code Registry (). - - - - - 421 - - - Misdirected Request - - - of this document - - - -
- -
- -
- - This document includes substantial input from the following individuals: - - - Adam Langley, Wan-Teh Chang, Jim Morrison, Mark Nottingham, Alyssa Wilk, Costin - Manolache, William Chan, Vitaliy Lvin, Joe Chan, Adam Barth, Ryan Hamilton, Gavin - Peters, Kent Alstad, Kevin Lindsay, Paul Amer, Fan Yang, Jonathan Leighton (SPDY - contributors). - - - Gabriel Montenegro and Willy Tarreau (Upgrade mechanism). - - - William Chan, Salvatore Loreto, Osama Mazahir, Gabriel Montenegro, Jitu Padhye, Roberto - Peon, Rob Trace (Flow control). - - - Mike Bishop (Extensibility). - - - Mark Nottingham, Julian Reschke, James Snell, Jeff Pinner, Mike Bishop, Herve Ruellan - (Substantial editorial contributions). - - - Kari Hurtta, Tatsuhiro Tsujikawa, Greg Wilkins, Poul-Henning Kamp. - - - Alexey Melnikov was an editor of this document during 2013. - - - A substantial proportion of Martin's contribution was supported by Microsoft during his - employment there. - - - -
-
- - - - - - HPACK - Header Compression for HTTP/2 - - - - - - - - - - - - Transmission Control Protocol - - - University of Southern California (USC)/Information Sciences - Institute - - - - - - - - - - - Key words for use in RFCs to Indicate Requirement Levels - - - Harvard University -
sob@harvard.edu
-
- -
- - -
- - - - - HTTP Over TLS - - - - - - - - - - Uniform Resource Identifier (URI): Generic - Syntax - - - - - - - - - - - - The Base16, Base32, and Base64 Data Encodings - - - - - - - - - Guidelines for Writing an IANA Considerations Section in RFCs - - - - - - - - - - - Augmented BNF for Syntax Specifications: ABNF - - - - - - - - - - - The Transport Layer Security (TLS) Protocol Version 1.2 - - - - - - - - - - - Transport Layer Security (TLS) Extensions: Extension Definitions - - - - - - - - - - Transport Layer Security (TLS) Application-Layer Protocol Negotiation Extension - - - - - - - - - - - - - TLS Elliptic Curve Cipher Suites with SHA-256/384 and AES Galois - Counter Mode (GCM) - - - - - - - - - - - Digital Signature Standard (DSS) - - NIST - - - - - - - - - Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - - Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - Hypertext Transfer Protocol (HTTP/1.1): Conditional Requests - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- -
- - - Hypertext Transfer Protocol (HTTP/1.1): Range Requests - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - World Wide Web Consortium -
ylafon@w3.org
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- -
- - - Hypertext Transfer Protocol (HTTP/1.1): Caching - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - Akamai -
mnot@mnot.net
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - Hypertext Transfer Protocol (HTTP/1.1): Authentication - - Adobe Systems Incorporated -
fielding@gbiv.com
-
- - greenbytes GmbH -
julian.reschke@greenbytes.de
-
- -
- - -
- - - - HTTP State Management Mechanism - - - - - -
- - - - - - TCP Extensions for High Performance - - - - - - - - - - - - Transport Layer Security Protocol Compression Methods - - - - - - - - - Additional HTTP Status Codes - - - - - - - - - - - Elliptic Curve Cryptography (ECC) Cipher Suites for Transport Layer Security (TLS) - - - - - - - - - - - - - - - AES Galois Counter Mode (GCM) Cipher Suites for TLS - - - - - - - - - - - - HTML5 - - - - - - - - - - - Latest version available at - . - - - - - - - Talking to Yourself for Fun and Profit - - - - - - - - - - - - - - BREACH: Reviving the CRIME Attack - - - - - - - - - - - Registration Procedures for Message Header Fields - - Nine by Nine -
GK-IETF@ninebynine.org
-
- - BEA Systems -
mnot@pobox.com
-
- - HP Labs -
JeffMogul@acm.org
-
- -
- - -
- - - - Recommendations for Secure Use of TLS and DTLS - - - - - - - - - - - - - - - - - - HTTP Alternative Services - - - Akamai - - - Mozilla - - - greenbytes - - - - - - -
- -
- - This section is to be removed by RFC Editor before publication. - - -
- - Renamed Not Authoritative status code to Misdirected Request. - -
- -
- - Pseudo-header fields are now required to appear strictly before regular ones. - - - Restored 1xx series status codes, except 101. - - - Changed frame length field 24-bits. Expanded frame header to 9 octets. Added a setting - to limit the damage. - - - Added a setting to advise peers of header set size limits. - - - Removed segments. - - - Made non-semantic-bearing HEADERS frames illegal in the HTTP mapping. - -
- -
- - Restored extensibility options. - - - Restricting TLS cipher suites to AEAD only. - - - Removing Content-Encoding requirements. - - - Permitting the use of PRIORITY after stream close. - - - Removed ALTSVC frame. - - - Removed BLOCKED frame. - - - Reducing the maximum padding size to 256 octets; removing padding from - CONTINUATION frames. - - - Removed per-frame GZIP compression. - -
- -
- - Added BLOCKED frame (at risk). - - - Simplified priority scheme. - - - Added DATA per-frame GZIP compression. - -
- -
- - Changed "connection header" to "connection preface" to avoid confusion. - - - Added dependency-based stream prioritization. - - - Added "h2c" identifier to distinguish between cleartext and secured HTTP/2. - - - Adding missing padding to PUSH_PROMISE. - - - Integrate ALTSVC frame and supporting text. - - - Dropping requirement on "deflate" Content-Encoding. - - - Improving security considerations around use of compression. - -
- -
- - Adding padding for data frames. - - - Renumbering frame types, error codes, and settings. - - - Adding INADEQUATE_SECURITY error code. - - - Updating TLS usage requirements to 1.2; forbidding TLS compression. - - - Removing extensibility for frames and settings. - - - Changing setting identifier size. - - - Removing the ability to disable flow control. - - - Changing the protocol identification token to "h2". - - - Changing the use of :authority to make it optional and to allow userinfo in non-HTTP - cases. - - - Allowing split on 0x0 for Cookie. - - - Reserved PRI method in HTTP/1.1 to avoid possible future collisions. - -
- -
- - Added cookie crumbling for more efficient header compression. - - - Added header field ordering with the value-concatenation mechanism. - -
- -
- - Marked draft for implementation. - -
- -
- - Adding definition for CONNECT method. - - - Constraining the use of push to safe, cacheable methods with no request body. - - - Changing from :host to :authority to remove any potential confusion. - - - Adding setting for header compression table size. - - - Adding settings acknowledgement. - - - Removing unnecessary and potentially problematic flags from CONTINUATION. - - - Added denial of service considerations. - -
-
- - Marking the draft ready for implementation. - - - Renumbering END_PUSH_PROMISE flag. - - - Editorial clarifications and changes. - -
- -
- - Added CONTINUATION frame for HEADERS and PUSH_PROMISE. - - - PUSH_PROMISE is no longer implicitly prohibited if SETTINGS_MAX_CONCURRENT_STREAMS is - zero. - - - Push expanded to allow all safe methods without a request body. - - - Clarified the use of HTTP header fields in requests and responses. Prohibited HTTP/1.1 - hop-by-hop header fields. - - - Requiring that intermediaries not forward requests with missing or illegal routing - :-headers. - - - Clarified requirements around handling different frames after stream close, stream reset - and GOAWAY. - - - Added more specific prohibitions for sending of different frame types in various stream - states. - - - Making the last received setting value the effective value. - - - Clarified requirements on TLS version, extension and ciphers. - -
- -
- - Committed major restructuring atrocities. - - - Added reference to first header compression draft. - - - Added more formal description of frame lifecycle. - - - Moved END_STREAM (renamed from FINAL) back to HEADERS/DATA. - - - Removed HEADERS+PRIORITY, added optional priority to HEADERS frame. - - - Added PRIORITY frame. - -
- -
- - Added continuations to frames carrying header blocks. - - - Replaced use of "session" with "connection" to avoid confusion with other HTTP stateful - concepts, like cookies. - - - Removed "message". - - - Switched to TLS ALPN from NPN. - - - Editorial changes. - -
- -
- - Added IANA considerations section for frame types, error codes and settings. - - - Removed data frame compression. - - - Added PUSH_PROMISE. - - - Added globally applicable flags to framing. - - - Removed zlib-based header compression mechanism. - - - Updated references. - - - Clarified stream identifier reuse. - - - Removed CREDENTIALS frame and associated mechanisms. - - - Added advice against naive implementation of flow control. - - - Added session header section. - - - Restructured frame header. Removed distinction between data and control frames. - - - Altered flow control properties to include session-level limits. - - - Added note on cacheability of pushed resources and multiple tenant servers. - - - Changed protocol label form based on discussions. - -
- -
- - Changed title throughout. - - - Removed section on Incompatibilities with SPDY draft#2. - - - Changed INTERNAL_ERROR on GOAWAY to have a value of 2 . - - - Replaced abstract and introduction. - - - Added section on starting HTTP/2.0, including upgrade mechanism. - - - Removed unused references. - - - Added flow control principles based on . - -
- -
- - Adopted as base for draft-ietf-httpbis-http2. - - - Updated authors/editors list. - - - Added status note. - -
-
- -
-
- diff --git a/pkg/http2/timer.go b/pkg/http2/timer.go new file mode 100644 index 0000000..0b1c17b --- /dev/null +++ b/pkg/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/pkg/http2/transport.go b/pkg/http2/transport.go index df578b8..090d0e1 100644 --- a/pkg/http2/transport.go +++ b/pkg/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -147,6 +146,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,41 +183,81 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + *transportTestHooks } -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() } - if t.MaxHeaderListSize == 0xffffffff { - return 0 +} + +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() } - return t.MaxHeaderListSize + return time.Now() } -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize + return time.Since(when) +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) } - return t.MaxReadFrameSize + return timeTimer{time.AfterFunc(d, f)} } -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second +func (t *Transport) maxHeaderListSize() uint32 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { + return 10 << 20 } - return t.PingTimeout + if n >= 0xffffffff { + return 0 + } + return uint32(n) +} +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. @@ -250,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -262,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -293,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -302,33 +366,56 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer - - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + idleTimer timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -386,12 +473,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -446,12 +533,14 @@ func (cs *clientStream) closeReqBodyLocked() { cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) }() } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -461,22 +550,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -507,6 +583,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -537,18 +615,16 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -559,7 +635,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -573,17 +649,33 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + tm := t.newTimer(d) select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -602,9 +694,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -658,6 +751,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -717,43 +813,38 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + seenSettingsChan: make(chan struct{}), + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), + } + var group synctestGroupInterface + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) + c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -765,30 +856,25 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -796,11 +882,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -810,23 +894,29 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() return nil, cc.werr } + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + go cc.readLoop() return cc, nil } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -861,7 +951,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } @@ -938,7 +1041,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -970,16 +1073,38 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -992,7 +1117,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1057,6 +1182,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1215,6 +1341,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,7 +1359,28 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { select { @@ -1322,11 +1473,14 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1334,7 +1488,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1342,12 +1496,31 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,24 +1545,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1452,9 +1609,9 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, @@ -1545,6 +1702,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1569,16 +1727,44 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + } + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1600,12 +1786,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -1875,8 +2066,28 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -1897,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -1912,19 +2123,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -1939,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2120,7 +2326,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2136,10 +2342,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2165,6 +2371,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2198,7 +2405,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2222,6 +2428,24 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + const unusedWaitTime = 5 * time.Second + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2265,11 +2489,10 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + readIdleTimeout := cc.readIdleTimeout + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2280,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2326,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2450,15 +2676,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -2642,7 +2887,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2684,7 +2929,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2777,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2873,6 +3131,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -2890,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -2898,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -2911,6 +3185,15 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { fl = &cs.flow } if !fl.add(int32(f.Increment)) { + // For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR + if cs != nil { + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeFlowControl, + }) + return nil + } + return ConnectionError(ErrCodeFlowControl) } cc.cond.Broadcast() @@ -2918,7 +3201,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -2955,24 +3238,26 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) + var pingError error + errc := make(chan struct{}) go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } }() select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -2991,6 +3276,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3013,13 +3304,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } @@ -3141,9 +3439,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } @@ -3165,7 +3471,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() diff --git a/pkg/http2/transport_test.go b/pkg/http2/transport_test.go index a81131f..0e12e0f 100644 --- a/pkg/http2/transport_test.go +++ b/pkg/http2/transport_test.go @@ -16,7 +16,6 @@ import ( "fmt" "io" "io/fs" - "io/ioutil" "log" "math/rand" "net" @@ -95,6 +94,88 @@ func startH2cServer(t *testing.T) net.Listener { return l } +func TestIdleConnTimeout(t *testing.T) { + for _, test := range []struct { + name string + idleConnTimeout time.Duration + wait time.Duration + baseTransport *http.Transport + wantNewConn bool + }{{ + name: "NoExpiry", + idleConnTimeout: 2 * time.Second, + wait: 1 * time.Second, + baseTransport: nil, + wantNewConn: false, + }, { + name: "H2TransportTimeoutExpires", + idleConnTimeout: 1 * time.Second, + wait: 2 * time.Second, + baseTransport: nil, + wantNewConn: true, + }, { + name: "H1TransportTimeoutExpires", + idleConnTimeout: 0 * time.Second, + wait: 1 * time.Second, + baseTransport: &http.Transport{ + IdleConnTimeout: 2 * time.Second, + }, + wantNewConn: false, + }} { + t.Run(test.name, func(t *testing.T) { + tt := newTestTransport(t, func(tr *Transport) { + tr.IdleConnTimeout = test.idleConnTimeout + }) + var tc *testClientConn + for i := 0; i < 3; i++ { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tt.roundTrip(req) + + // This request happens on a new conn if it's the first request + // (and there is no cached conn), or if the test timeout is long + // enough that old conns are being closed. + wantConn := i == 0 || test.wantNewConn + if has := tt.hasConn(); has != wantConn { + t.Fatalf("request %v: hasConn=%v, want %v", i, has, wantConn) + } + if wantConn { + tc = tt.getConn() + // Read client's SETTINGS and first WINDOW_UPDATE, + // send our SETTINGS. + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + tc.writeSettings() + } + if tt.hasConn() { + t.Fatalf("request %v: Transport has more than one conn", i) + } + + // Respond to the client's request. + hf := readFrame[*HeadersFrame](t, tc) + tc.writeHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt.wantStatus(200) + + // If this was a newly-accepted conn, read the SETTINGS ACK. + if wantConn { + tc.wantFrameType(FrameSettings) // ACK to our settings + } + + tt.advance(test.wait) + if got, want := tc.isClosed(), test.wantNewConn; got != want { + t.Fatalf("after waiting %v, conn closed=%v; want %v", test.wait, got, want) + } + } + }) + } +} + func TestTransportH2c(t *testing.T) { l := startH2cServer(t) defer l.Close() @@ -124,7 +205,7 @@ func TestTransportH2c(t *testing.T) { if res.ProtoMajor != 2 { t.Fatal("proto not h2c") } - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) if err != nil { t.Fatal(err) } @@ -138,15 +219,14 @@ func TestTransportH2c(t *testing.T) { func TestTransport(t *testing.T) { const body = "sup" - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, body) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - u, err := url.Parse(st.ts.URL) + u, err := url.Parse(ts.URL) if err != nil { t.Fatal(err) } @@ -182,7 +262,7 @@ func TestTransport(t *testing.T) { if res.TLS == nil { t.Errorf("%d: Response.TLS = nil; want non-nil", i) } - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) if err != nil { t.Errorf("%d: Body read: %v", i, err) } else if string(slurp) != body { @@ -193,26 +273,27 @@ func TestTransport(t *testing.T) { } func testTransportReusesConns(t *testing.T, useClient, wantSame bool, modReq func(*http.Request)) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, r.RemoteAddr) - }, optOnlyServer, func(c net.Conn, st http.ConnState) { - t.Logf("conn %v is now state %v", c.RemoteAddr(), st) + }, func(ts *httptest.Server) { + ts.Config.ConnState = func(c net.Conn, st http.ConnState) { + t.Logf("conn %v is now state %v", c.RemoteAddr(), st) + } }) - defer st.Close() tr := &Transport{TLSClientConfig: tlsConfigInsecure} if useClient { tr.ConnPool = noDialClientConnPool{new(clientConnPool)} } defer tr.CloseIdleConnections() get := func() string { - req, err := http.NewRequest("GET", st.ts.URL, nil) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } modReq(req) var res *http.Response if useClient { - c := st.ts.Client() + c := ts.Client() ConfigureTransports(c.Transport.(*http.Transport)) res, err = c.Do(req) } else { @@ -222,7 +303,7 @@ func testTransportReusesConns(t *testing.T, useClient, wantSame bool, modReq fun t.Fatal(err) } defer res.Body.Close() - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) if err != nil { t.Fatalf("Body read: %v", err) } @@ -276,15 +357,12 @@ func TestTransportGetGotConnHooks_HTTP2Transport(t *testing.T) { func TestTransportGetGotConnHooks_Client(t *testing.T) { testTransportGetGotConnHooks(t, true) } func testTransportGetGotConnHooks(t *testing.T, useClient bool) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, r.RemoteAddr) - }, func(s *httptest.Server) { - s.EnableHTTP2 = true - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} - client := st.ts.Client() + client := ts.Client() ConfigureTransports(client.Transport.(*http.Transport)) var ( @@ -307,7 +385,7 @@ func testTransportGetGotConnHooks(t *testing.T, useClient bool) { } }, } - req, err := http.NewRequest("GET", st.ts.URL, nil) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } @@ -350,9 +428,8 @@ func (c *testNetConn) Close() error { // Tests that the Transport only keeps one pending dial open per destination address. // https://golang.org/issue/13397 func TestTransportGroupsPendingDials(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - }, optOnlyServer) - defer st.Close() + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { + }) var ( mu sync.Mutex dialCount int @@ -381,7 +458,7 @@ func TestTransportGroupsPendingDials(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - req, err := http.NewRequest("GET", st.ts.URL, nil) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Error(err) return @@ -404,35 +481,21 @@ func TestTransportGroupsPendingDials(t *testing.T) { } } -func retry(tries int, delay time.Duration, fn func() error) error { - var err error - for i := 0; i < tries; i++ { - err = fn() - if err == nil { - return nil - } - time.Sleep(delay) - } - return err -} - func TestTransportAbortClosesPipes(t *testing.T) { shutdown := make(chan struct{}) - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.(http.Flusher).Flush() <-shutdown }, - optOnlyServer, ) - defer st.Close() defer close(shutdown) // we must shutdown before st.Close() to avoid hanging errCh := make(chan error) go func() { defer close(errCh) tr := &Transport{TLSClientConfig: tlsConfigInsecure} - req, err := http.NewRequest("GET", st.ts.URL, nil) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { errCh <- err return @@ -443,8 +506,8 @@ func TestTransportAbortClosesPipes(t *testing.T) { return } defer res.Body.Close() - st.closeConn() - _, err = ioutil.ReadAll(res.Body) + ts.CloseClientConnections() + _, err = io.ReadAll(res.Body) if err == nil { errCh <- errors.New("expected error from res.Body.Read") return @@ -466,13 +529,11 @@ func TestTransportAbortClosesPipes(t *testing.T) { // could be a table-driven test with extra goodies. func TestTransportPath(t *testing.T) { gotc := make(chan *url.URL, 1) - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { gotc <- r.URL }, - optOnlyServer, ) - defer st.Close() tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() @@ -480,7 +541,7 @@ func TestTransportPath(t *testing.T) { path = "/testpath" query = "q=1" ) - surl := st.ts.URL + path + "?" + query + surl := ts.URL + path + "?" + query req, err := http.NewRequest("POST", surl, nil) if err != nil { t.Fatal(err) @@ -574,18 +635,16 @@ func TestTransportBody(t *testing.T) { err error } gotc := make(chan reqInfo, 1) - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { - slurp, err := ioutil.ReadAll(r.Body) + slurp, err := io.ReadAll(r.Body) if err != nil { gotc <- reqInfo{err: err} } else { gotc <- reqInfo{req: r, slurp: slurp} } }, - optOnlyServer, ) - defer st.Close() for i, tt := range bodyTests { tr := &Transport{TLSClientConfig: tlsConfigInsecure} @@ -595,7 +654,7 @@ func TestTransportBody(t *testing.T) { if tt.noContentLen { body = struct{ io.Reader }{body} // just a Reader, hiding concrete type and other methods } - req, err := http.NewRequest("POST", st.ts.URL, body) + req, err := http.NewRequest("POST", ts.URL, body) if err != nil { t.Fatalf("#%d: %v", i, err) } @@ -635,15 +694,13 @@ func TestTransportDialTLS(t *testing.T) { var mu sync.Mutex // guards following var gotReq, didDial bool - ts := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { mu.Lock() gotReq = true mu.Unlock() }, - optOnlyServer, ) - defer ts.Close() tr := &Transport{ DialTLS: func(netw, addr string, cfg *tls.Config) (net.Conn, error) { mu.Lock() @@ -659,7 +716,7 @@ func TestTransportDialTLS(t *testing.T) { } defer tr.CloseIdleConnections() client := &http.Client{Transport: tr} - res, err := client.Get(ts.ts.URL) + res, err := client.Get(ts.URL) if err != nil { t.Fatal(err) } @@ -694,18 +751,17 @@ func TestConfigureTransport(t *testing.T) { } // And does it work? - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, r.Proto) - }, optOnlyServer) - defer st.Close() + }) t1.TLSClientConfig.InsecureSkipVerify = true c := &http.Client{Transport: t1} - res, err := c.Get(st.ts.URL) + res, err := c.Get(ts.URL) if err != nil { t.Fatal(err) } - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) if err != nil { t.Fatal(err) } @@ -740,53 +796,6 @@ func (fw flushWriter) Write(p []byte) (n int, err error) { return } -type clientTester struct { - t *testing.T - tr *Transport - sc, cc net.Conn // server and client conn - fr *Framer // server's framer - settings *SettingsFrame - client func() error - server func() error -} - -func newClientTester(t *testing.T) *clientTester { - var dialOnce struct { - sync.Mutex - dialed bool - } - ct := &clientTester{ - t: t, - } - ct.tr = &Transport{ - TLSClientConfig: tlsConfigInsecure, - DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { - dialOnce.Lock() - defer dialOnce.Unlock() - if dialOnce.dialed { - return nil, errors.New("only one dial allowed in test mode") - } - dialOnce.dialed = true - return ct.cc, nil - }, - } - - ln := newLocalListener(t) - cc, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - t.Fatal(err) - } - sc, err := ln.Accept() - if err != nil { - t.Fatal(err) - } - ln.Close() - ct.cc = cc - ct.sc = sc - ct.fr = NewFramer(sc, sc) - return ct -} - func newLocalListener(t *testing.T) net.Listener { ln, err := net.Listen("tcp4", "127.0.0.1:0") if err == nil { @@ -799,302 +808,88 @@ func newLocalListener(t *testing.T) net.Listener { return ln } -func (ct *clientTester) greet(settings ...Setting) { - buf := make([]byte, len(ClientPreface)) - _, err := io.ReadFull(ct.sc, buf) - if err != nil { - ct.t.Fatalf("reading client preface: %v", err) - } - f, err := ct.fr.ReadFrame() - if err != nil { - ct.t.Fatalf("Reading client settings frame: %v", err) - } - var ok bool - if ct.settings, ok = f.(*SettingsFrame); !ok { - ct.t.Fatalf("Wanted client settings frame; got %v", f) - } - if err := ct.fr.WriteSettings(settings...); err != nil { - ct.t.Fatal(err) - } - if err := ct.fr.WriteSettingsAck(); err != nil { - ct.t.Fatal(err) - } -} - -func (ct *clientTester) readNonSettingsFrame() (Frame, error) { - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return nil, err - } - if _, ok := f.(*SettingsFrame); ok { - continue - } - return f, nil - } -} - -// writeReadPing sends a PING and immediately reads the PING ACK. -// It will fail if any other unread data was pending on the connection, -// aside from SETTINGS frames. -func (ct *clientTester) writeReadPing() error { - data := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} - if err := ct.fr.WritePing(false, data); err != nil { - return fmt.Errorf("Error writing PING: %v", err) - } - f, err := ct.readNonSettingsFrame() - if err != nil { - return err - } - p, ok := f.(*PingFrame) - if !ok { - return fmt.Errorf("got a %v, want a PING ACK", f) - } - if p.Flags&FlagPingAck == 0 { - return fmt.Errorf("got a PING, want a PING ACK") - } - if p.Data != data { - return fmt.Errorf("got PING data = %x, want %x", p.Data, data) - } - return nil -} - -func (ct *clientTester) inflowWindow(streamID uint32) int32 { - pool := ct.tr.connPoolOrDef.(*clientConnPool) - pool.mu.Lock() - defer pool.mu.Unlock() - if n := len(pool.keys); n != 1 { - ct.t.Errorf("clientConnPool contains %v keys, expected 1", n) - return -1 - } - for cc := range pool.keys { - cc.mu.Lock() - defer cc.mu.Unlock() - if streamID == 0 { - return cc.inflow.avail + cc.inflow.unsent - } - cs := cc.streams[streamID] - if cs == nil { - ct.t.Errorf("no stream with id %v", streamID) - return -1 - } - return cs.inflow.avail + cs.inflow.unsent - } - return -1 -} - -func (ct *clientTester) cleanup() { - ct.tr.CloseIdleConnections() - - // close both connections, ignore the error if its already closed - ct.sc.Close() - ct.cc.Close() -} - -func (ct *clientTester) run() { - var errOnce sync.Once - var wg sync.WaitGroup - - run := func(which string, fn func() error) { - defer wg.Done() - if err := fn(); err != nil { - errOnce.Do(func() { - ct.t.Errorf("%s: %v", which, err) - ct.cleanup() - }) - } - } - - wg.Add(2) - go run("client", ct.client) - go run("server", ct.server) - wg.Wait() +func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) } +func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) } - errOnce.Do(ct.cleanup) // clean up if no error -} +func testTransportReqBodyAfterResponse(t *testing.T, status int) { + const bodySize = 1 << 10 + + tc := newTestClientConn(t) + tc.greet() + + body := tc.newRequestBody() + body.writeBytes(bodySize / 2) + req, _ := http.NewRequest("PUT", "https://dummy.tld/", body) + rt := tc.roundTrip(req) + + tc.wantHeaders(wantHeader{ + streamID: rt.streamID(), + endStream: false, + header: http.Header{ + ":authority": []string{"dummy.tld"}, + ":method": []string{"PUT"}, + ":path": []string{"/"}, + }, + }) -func (ct *clientTester) readFrame() (Frame, error) { - return ct.fr.ReadFrame() -} + // Provide enough congestion window for the full request body. + tc.writeWindowUpdate(0, bodySize) + tc.writeWindowUpdate(rt.streamID(), bodySize) -func (ct *clientTester) firstHeaders() (*HeadersFrame, error) { - for { - f, err := ct.readFrame() - if err != nil { - return nil, fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - switch f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - continue - } - hf, ok := f.(*HeadersFrame) - if !ok { - return nil, fmt.Errorf("Got %T; want HeadersFrame", f) - } - return hf, nil - } -} + tc.wantData(wantData{ + streamID: rt.streamID(), + endStream: false, + size: bodySize / 2, + }) -type countingReader struct { - n *int64 -} + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", strconv.Itoa(status), + ), + }) -func (r countingReader) Read(p []byte) (n int, err error) { - for i := range p { - p[i] = byte(i) + res := rt.response() + if res.StatusCode != status { + t.Fatalf("status code = %v; want %v", res.StatusCode, status) } - atomic.AddInt64(r.n, int64(len(p))) - return len(p), err -} -func TestTransportReqBodyAfterResponse_200(t *testing.T) { testTransportReqBodyAfterResponse(t, 200) } -func TestTransportReqBodyAfterResponse_403(t *testing.T) { testTransportReqBodyAfterResponse(t, 403) } + body.writeBytes(bodySize / 2) + body.closeWithError(io.EOF) -func testTransportReqBodyAfterResponse(t *testing.T, status int) { - const bodySize = 10 << 20 - clientDone := make(chan struct{}) - ct := newClientTester(t) - recvLen := make(chan int64, 1) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - if runtime.GOOS == "plan9" { - // CloseWrite not supported on Plan 9; Issue 17906 - defer ct.cc.(*net.TCPConn).Close() - } - defer close(clientDone) - - body := &pipe{b: new(bytes.Buffer)} - io.Copy(body, io.LimitReader(neverEnding('A'), bodySize/2)) - req, err := http.NewRequest("PUT", "https://dummy.tld/", body) - if err != nil { - return err - } - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - if res.StatusCode != status { - return fmt.Errorf("status code = %v; want %v", res.StatusCode, status) - } - io.Copy(body, io.LimitReader(neverEnding('A'), bodySize/2)) - body.CloseWithError(io.EOF) - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("Slurp: %v", err) - } - if len(slurp) > 0 { - return fmt.Errorf("unexpected body: %q", slurp) - } - res.Body.Close() - if status == 200 { - if got := <-recvLen; got != bodySize { - return fmt.Errorf("For 200 response, Transport wrote %d bytes; want %d", got, bodySize) - } - } else { - if got := <-recvLen; got == 0 || got >= bodySize { - return fmt.Errorf("For %d response, Transport wrote %d bytes; want (0,%d) exclusive", status, got, bodySize) - } - } - return nil + if status == 200 { + // After a 200 response, client sends the remaining request body. + tc.wantData(wantData{ + streamID: rt.streamID(), + endStream: true, + size: bodySize / 2, + multiple: true, + }) + } else { + // After a 403 response, client gives up and resets the stream. + tc.wantFrameType(FrameRSTStream) } - ct.server = func() error { - ct.greet() - defer close(recvLen) - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - var dataRecv int64 - var closed bool - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - // If the client's done, it - // will have reported any - // errors on its side. - return nil - default: - return err - } - } - //println(fmt.Sprintf("server got frame: %v", f)) - ended := false - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - if !f.HeadersEnded() { - return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) - } - if f.StreamEnded() { - return fmt.Errorf("headers contains END_STREAM unexpectedly: %v", f) - } - case *DataFrame: - dataLen := len(f.Data()) - if dataLen > 0 { - if dataRecv == 0 { - enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - } - if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { - return err - } - if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { - return err - } - } - dataRecv += int64(dataLen) - - if !closed && ((status != 200 && dataRecv > 0) || - (status == 200 && f.StreamEnded())) { - closed = true - if err := ct.fr.WriteData(f.StreamID, true, nil); err != nil { - return err - } - } - if f.StreamEnded() { - ended = true - } - case *RSTStreamFrame: - if status == 200 { - return fmt.Errorf("Unexpected client frame %v", f) - } - ended = true - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - if ended { - select { - case recvLen <- dataRecv: - default: - } - } - } - } - ct.run() + rt.wantBody(nil) } // See golang.org/issue/13444 func TestTransportFullDuplex(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) // redundant but for clarity w.(http.Flusher).Flush() io.Copy(flushWriter{w}, capitalizeReader{r.Body}) fmt.Fprintf(w, "bye.\n") - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() c := &http.Client{Transport: tr} pr, pw := io.Pipe() - req, err := http.NewRequest("PUT", st.ts.URL, ioutil.NopCloser(pr)) + req, err := http.NewRequest("PUT", ts.URL, io.NopCloser(pr)) if err != nil { t.Fatal(err) } @@ -1132,12 +927,11 @@ func TestTransportFullDuplex(t *testing.T) { func TestTransportConnectRequest(t *testing.T) { gotc := make(chan *http.Request, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { gotc <- r - }, optOnlyServer) - defer st.Close() + }) - u, err := url.Parse(st.ts.URL) + u, err := url.Parse(ts.URL) if err != nil { t.Fatal(err) } @@ -1257,121 +1051,74 @@ func testTransportResPattern(t *testing.T, expect100Continue, resHeader headerTy panic("invalid combination") } - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody)) - if expect100Continue != noHeader { - req.Header.Set("Expect", "100-continue") - } - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return fmt.Errorf("status code = %v; want 200", res.StatusCode) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("Slurp: %v", err) - } - wantBody := resBody - if !withData { - wantBody = "" - } - if string(slurp) != wantBody { - return fmt.Errorf("body = %q; want %q", slurp, wantBody) - } - if trailers == noHeader { - if len(res.Trailer) > 0 { - t.Errorf("Trailer = %v; want none", res.Trailer) - } - } else { - want := http.Header{"Some-Trailer": {"some-value"}} - if !reflect.DeepEqual(res.Trailer, want) { - t.Errorf("Trailer = %v; want %v", res.Trailer, want) - } - } - return nil + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("POST", "https://dummy.tld/", strings.NewReader(reqBody)) + if expect100Continue != noHeader { + req.Header.Set("Expect", "100-continue") } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) + rt := tc.roundTrip(req) - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - endStream := false - send := func(mode headerType) { - hbf := buf.Bytes() - switch mode { - case oneHeader: - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.Header().StreamID, - EndHeaders: true, - EndStream: endStream, - BlockFragment: hbf, - }) - case splitHeader: - if len(hbf) < 2 { - panic("too small") - } - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.Header().StreamID, - EndHeaders: false, - EndStream: endStream, - BlockFragment: hbf[:1], - }) - ct.fr.WriteContinuation(f.Header().StreamID, true, hbf[1:]) - default: - panic("bogus mode") - } - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *DataFrame: - if !f.StreamEnded() { - // No need to send flow control tokens. The test request body is tiny. - continue - } - // Response headers (1+ frames; 1 or 2 in this test, but never 0) - { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "x-foo", Value: "blah"}) - enc.WriteField(hpack.HeaderField{Name: "x-bar", Value: "more"}) - if trailers != noHeader { - enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "some-trailer"}) - } - endStream = withData == false && trailers == noHeader - send(resHeader) - } - if withData { - endStream = trailers == noHeader - ct.fr.WriteData(f.StreamID, endStream, []byte(resBody)) - } - if trailers != noHeader { - endStream = true - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "some-value"}) - send(trailers) - } - if endStream { - return nil - } - case *HeadersFrame: - if expect100Continue != noHeader { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"}) - send(expect100Continue) - } - } - } + tc.wantFrameType(FrameHeaders) + + // Possibly 100-continue, or skip when noHeader. + tc.writeHeadersMode(expect100Continue, HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "100", + ), + }) + + // Client sends request body. + tc.wantData(wantData{ + streamID: rt.streamID(), + endStream: true, + size: len(reqBody), + }) + + hdr := []string{ + ":status", "200", + "x-foo", "blah", + "x-bar", "more", + } + if trailers != noHeader { + hdr = append(hdr, "trailer", "some-trailer") + } + tc.writeHeadersMode(resHeader, HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: withData == false && trailers == noHeader, + BlockFragment: tc.makeHeaderBlockFragment(hdr...), + }) + if withData { + endStream := trailers == noHeader + tc.writeData(rt.streamID(), endStream, []byte(resBody)) + } + tc.writeHeadersMode(trailers, HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + "some-trailer", "some-value", + ), + }) + + rt.wantStatus(200) + if !withData { + rt.wantBody(nil) + } else { + rt.wantBody([]byte(resBody)) + } + if trailers == noHeader { + rt.wantTrailers(nil) + } else { + rt.wantTrailers(http.Header{ + "Some-Trailer": {"some-value"}, + }) } - ct.run() } // Issue 26189, Issue 17739: ignore unknown 1xx responses @@ -1383,130 +1130,76 @@ func TestTransportUnknown1xx(t *testing.T) { return nil } - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - defer res.Body.Close() - if res.StatusCode != 204 { - return fmt.Errorf("status code = %v; want 204", res.StatusCode) - } - want := `code=110 header=map[Foo-Bar:[110]] + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + for i := 110; i <= 114; i++ { + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", fmt.Sprint(i), + "foo-bar", fmt.Sprint(i), + ), + }) + } + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "204", + ), + }) + + res := rt.response() + if res.StatusCode != 204 { + t.Fatalf("status code = %v; want 204", res.StatusCode) + } + want := `code=110 header=map[Foo-Bar:[110]] code=111 header=map[Foo-Bar:[111]] code=112 header=map[Foo-Bar:[112]] code=113 header=map[Foo-Bar:[113]] code=114 header=map[Foo-Bar:[114]] ` - if got := buf.String(); got != want { - t.Errorf("Got trace:\n%s\nWant:\n%s", got, want) - } - return nil - } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - for i := 110; i <= 114; i++ { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: fmt.Sprint(i)}) - enc.WriteField(hpack.HeaderField{Name: "foo-bar", Value: fmt.Sprint(i)}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - } - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - return nil - } - } + if got := buf.String(); got != want { + t.Errorf("Got trace:\n%s\nWant:\n%s", got, want) } - ct.run() - } func TestTransportReceiveUndeclaredTrailer(t *testing.T) { - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return fmt.Errorf("status code = %v; want 200", res.StatusCode) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("res.Body ReadAll error = %q, %v; want %v", slurp, err, nil) - } - if len(slurp) > 0 { - return fmt.Errorf("body = %q; want nothing", slurp) - } - if _, ok := res.Trailer["Some-Trailer"]; !ok { - return fmt.Errorf("expected Some-Trailer") - } - return nil - } - ct.server = func() error { - ct.greet() - - var n int - var hf *HeadersFrame - for hf == nil && n < 10 { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - hf, _ = f.(*HeadersFrame) - n++ - } - - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - - // send headers without Trailer header - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + "some-trailer", "I'm an undeclared Trailer!", + ), + }) - // send trailers - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: "some-trailer", Value: "I'm an undeclared Trailer!"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - return nil - } - ct.run() + rt.wantStatus(200) + rt.wantBody(nil) + rt.wantTrailers(http.Header{ + "Some-Trailer": []string{"I'm an undeclared Trailer!"}, + }) } func TestTransportInvalidTrailer_Pseudo1(t *testing.T) { @@ -1516,10 +1209,10 @@ func TestTransportInvalidTrailer_Pseudo2(t *testing.T) { testTransportInvalidTrailer_Pseudo(t, splitHeader) } func testTransportInvalidTrailer_Pseudo(t *testing.T, trailers headerType) { - testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: ":colon", Value: "foo"}) - enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) - }) + testInvalidTrailer(t, trailers, pseudoHeaderError(":colon"), + ":colon", "foo", + "foo", "bar", + ) } func TestTransportInvalidTrailer_Capital1(t *testing.T) { @@ -1529,102 +1222,54 @@ func TestTransportInvalidTrailer_Capital2(t *testing.T) { testTransportInvalidTrailer_Capital(t, splitHeader) } func testTransportInvalidTrailer_Capital(t *testing.T, trailers headerType) { - testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) - enc.WriteField(hpack.HeaderField{Name: "Capital", Value: "bad"}) - }) + testInvalidTrailer(t, trailers, headerFieldNameError("Capital"), + "foo", "bar", + "Capital", "bad", + ) } func TestTransportInvalidTrailer_EmptyFieldName(t *testing.T) { - testInvalidTrailer(t, oneHeader, headerFieldNameError(""), func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: "", Value: "bad"}) - }) + testInvalidTrailer(t, oneHeader, headerFieldNameError(""), + "", "bad", + ) } func TestTransportInvalidTrailer_BinaryFieldValue(t *testing.T) { - testInvalidTrailer(t, oneHeader, headerFieldValueError("x"), func(enc *hpack.Encoder) { - enc.WriteField(hpack.HeaderField{Name: "x", Value: "has\nnewline"}) - }) + testInvalidTrailer(t, oneHeader, headerFieldValueError("x"), + "x", "has\nnewline", + ) } -func testInvalidTrailer(t *testing.T, trailers headerType, wantErr error, writeTrailer func(*hpack.Encoder)) { - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return fmt.Errorf("status code = %v; want 200", res.StatusCode) - } - slurp, err := ioutil.ReadAll(res.Body) - se, ok := err.(StreamError) - if !ok || se.Cause != wantErr { - return fmt.Errorf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", slurp, err, wantErr, wantErr) - } - if len(slurp) > 0 { - return fmt.Errorf("body = %q; want nothing", slurp) - } - return nil - } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) +func testInvalidTrailer(t *testing.T, mode headerType, wantErr error, trailers ...string) { + tc := newTestClientConn(t) + tc.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - switch f := f.(type) { - case *HeadersFrame: - var endStream bool - send := func(mode headerType) { - hbf := buf.Bytes() - switch mode { - case oneHeader: - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: endStream, - BlockFragment: hbf, - }) - case splitHeader: - if len(hbf) < 2 { - panic("too small") - } - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: false, - EndStream: endStream, - BlockFragment: hbf[:1], - }) - ct.fr.WriteContinuation(f.StreamID, true, hbf[1:]) - default: - panic("bogus mode") - } - } - // Response headers (1+ frames; 1 or 2 in this test, but never 0) - { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "trailer", Value: "declared"}) - endStream = false - send(oneHeader) - } - // Trailers: - { - endStream = true - buf.Reset() - writeTrailer(enc) - send(trailers) - } - return nil - } - } + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + "trailer", "declared", + ), + }) + tc.writeHeadersMode(mode, HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment(trailers...), + }) + + rt.wantStatus(200) + body, err := rt.readBody() + se, ok := err.(StreamError) + if !ok || se.Cause != wantErr { + t.Fatalf("res.Body ReadAll error = %q, %#v; want StreamError with cause %T, %#v", body, err, wantErr, wantErr) + } + if len(body) > 0 { + t.Fatalf("body = %q; want nothing", body) } - ct.run() } // headerListSize returns the HTTP2 header list size of h. @@ -1741,24 +1386,22 @@ func TestPadHeaders(t *testing.T) { } func TestTransportChecksRequestHeaderListSize(t *testing.T) { - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { // Consume body & force client to send // trailers before writing response. - // ioutil.ReadAll returns non-nil err for + // io.ReadAll returns non-nil err for // requests that attempt to send greater than // maxHeaderListSize bytes of trailers, since // those requests generate a stream reset. - ioutil.ReadAll(r.Body) + io.ReadAll(r.Body) r.Body.Close() }, func(ts *httptest.Server) { ts.Config.MaxHeaderBytes = 16 << 10 }, - optOnlyServer, optQuiet, ) - defer st.Close() tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() @@ -1766,7 +1409,7 @@ func TestTransportChecksRequestHeaderListSize(t *testing.T) { checkRoundTrip := func(req *http.Request, wantErr error, desc string) { // Make an arbitrary request to ensure we get the server's // settings frame and initialize peerMaxHeaderListSize. - req0, err := http.NewRequest("GET", st.ts.URL, nil) + req0, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatalf("newRequest: NewRequest: %v", err) } @@ -1829,13 +1472,29 @@ func TestTransportChecksRequestHeaderListSize(t *testing.T) { newRequest := func() *http.Request { // Body must be non-nil to enable writing trailers. body := strings.NewReader("hello") - req, err := http.NewRequest("POST", st.ts.URL, body) + req, err := http.NewRequest("POST", ts.URL, body) if err != nil { t.Fatalf("newRequest: NewRequest: %v", err) } return req } + var ( + scMu sync.Mutex + sc *serverConn + ) + testHookGetServerConn = func(v *serverConn) { + scMu.Lock() + defer scMu.Unlock() + if sc != nil { + panic("testHookGetServerConn called multiple times") + } + sc = v + } + defer func() { + testHookGetServerConn = nil + }() + // Validate peerMaxHeaderListSize. req := newRequest() checkRoundTrip(req, nil, "Initial request") @@ -1847,16 +1506,16 @@ func TestTransportChecksRequestHeaderListSize(t *testing.T) { cc.mu.Lock() peerSize := cc.peerMaxHeaderListSize cc.mu.Unlock() - st.scMu.Lock() - wantSize := uint64(st.sc.maxHeaderListSize()) - st.scMu.Unlock() + scMu.Lock() + wantSize := uint64(sc.maxHeaderListSize()) + scMu.Unlock() if peerSize != wantSize { t.Errorf("peerMaxHeaderListSize = %v; want %v", peerSize, wantSize) } // Sanity check peerSize. (*serverConn) maxHeaderListSize adds // 320 bytes of padding. - wantHeaderBytes := uint64(st.ts.Config.MaxHeaderBytes) + 320 + wantHeaderBytes := uint64(ts.Config.MaxHeaderBytes) + 320 if peerSize != wantHeaderBytes { t.Errorf("peerMaxHeaderListSize = %v; want %v.", peerSize, wantHeaderBytes) } @@ -1900,115 +1559,80 @@ func TestTransportChecksRequestHeaderListSize(t *testing.T) { } func TestTransportChecksResponseHeaderListSize(t *testing.T) { - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if e, ok := err.(StreamError); ok { - err = e.Cause - } - if err != errResponseHeaderListSize { - size := int64(0) - if res != nil { - res.Body.Close() - for k, vv := range res.Header { - for _, v := range vv { - size += int64(len(k)) + int64(len(v)) + 32 - } - } - } - return fmt.Errorf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size) - } - return nil - } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + + hdr := []string{":status", "200"} + large := strings.Repeat("a", 1<<10) + for i := 0; i < 5042; i++ { + hdr = append(hdr, large, large) + } + hbf := tc.makeHeaderBlockFragment(hdr...) + // Note: this number might change if our hpack implementation changes. + // That's fine. This is just a sanity check that our response can fit in a single + // header block fragment frame. + if size, want := len(hbf), 6329; size != want { + t.Fatalf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want) + } + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: hbf, + }) - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - switch f := f.(type) { - case *HeadersFrame: - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - large := strings.Repeat("a", 1<<10) - for i := 0; i < 5042; i++ { - enc.WriteField(hpack.HeaderField{Name: large, Value: large}) - } - if size, want := buf.Len(), 6329; size != want { - // Note: this number might change if - // our hpack implementation - // changes. That's fine. This is - // just a sanity check that our - // response can fit in a single - // header block fragment frame. - return fmt.Errorf("encoding over 10MB of duplicate keypairs took %d bytes; expected %d", size, want) + res, err := rt.result() + if e, ok := err.(StreamError); ok { + err = e.Cause + } + if err != errResponseHeaderListSize { + size := int64(0) + if res != nil { + res.Body.Close() + for k, vv := range res.Header { + for _, v := range vv { + size += int64(len(k)) + int64(len(v)) + 32 } - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - return nil } } + t.Fatalf("RoundTrip Error = %v (and %d bytes of response headers); want errResponseHeaderListSize", err, size) } - ct.run() } func TestTransportCookieHeaderSplit(t *testing.T) { - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - req.Header.Add("Cookie", "a=b;c=d; e=f;") - req.Header.Add("Cookie", "e=f;g=h; ") - req.Header.Add("Cookie", "i=j") - _, err := ct.tr.RoundTrip(req) - return err - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - switch f := f.(type) { - case *HeadersFrame: - dec := hpack.NewDecoder(initialHeaderTableSize, nil) - hfs, err := dec.DecodeFull(f.HeaderBlockFragment()) - if err != nil { - return err - } - got := []string{} - want := []string{"a=b", "c=d", "e=f", "e=f", "g=h", "i=j"} - for _, hf := range hfs { - if hf.Name == "cookie" { - got = append(got, hf.Value) - } - } - if !reflect.DeepEqual(got, want) { - t.Errorf("Cookies = %#v, want %#v", got, want) - } + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + req.Header.Add("Cookie", "a=b;c=d; e=f;") + req.Header.Add("Cookie", "e=f;g=h; ") + req.Header.Add("Cookie", "i=j") + rt := tc.roundTrip(req) + + tc.wantHeaders(wantHeader{ + streamID: rt.streamID(), + endStream: true, + header: http.Header{ + "cookie": []string{"a=b", "c=d", "e=f", "e=f", "g=h", "i=j"}, + }, + }) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "204", + ), + }) - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - return nil - } - } + if err := rt.err(); err != nil { + t.Fatalf("RoundTrip = %v, want success", err) } - ct.run() } // Test that the Transport returns a typed error from Response.Body.Read calls @@ -2016,22 +1640,20 @@ func TestTransportCookieHeaderSplit(t *testing.T) { // a stream error, but others like cancel should be similar) func TestTransportBodyReadErrorType(t *testing.T) { doPanic := make(chan bool, 1) - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.(http.Flusher).Flush() // force headers out <-doPanic panic("boom") }, - optOnlyServer, optQuiet, ) - defer st.Close() tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() c := &http.Client{Transport: tr} - res, err := c.Get(st.ts.URL) + res, err := c.Get(ts.URL) if err != nil { t.Fatal(err) } @@ -2055,7 +1677,7 @@ func TestTransportDoubleCloseOnWriteError(t *testing.T) { conn net.Conn // to close if set ) - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { mu.Lock() defer mu.Unlock() @@ -2063,9 +1685,7 @@ func TestTransportDoubleCloseOnWriteError(t *testing.T) { conn.Close() } }, - optOnlyServer, ) - defer st.Close() tr := &Transport{ TLSClientConfig: tlsConfigInsecure, @@ -2082,20 +1702,18 @@ func TestTransportDoubleCloseOnWriteError(t *testing.T) { } defer tr.CloseIdleConnections() c := &http.Client{Transport: tr} - c.Get(st.ts.URL) + c.Get(ts.URL) } // Test that the http1 Transport.DisableKeepAlives option is respected // and connections are closed as soon as idle. // See golang.org/issue/14008 func TestTransportDisableKeepAlives(t *testing.T) { - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "hi") }, - optOnlyServer, ) - defer st.Close() connClosed := make(chan struct{}) // closed on tls.Conn.Close tr := &Transport{ @@ -2112,11 +1730,11 @@ func TestTransportDisableKeepAlives(t *testing.T) { }, } c := &http.Client{Transport: tr} - res, err := c.Get(st.ts.URL) + res, err := c.Get(ts.URL) if err != nil { t.Fatal(err) } - if _, err := ioutil.ReadAll(res.Body); err != nil { + if _, err := io.ReadAll(res.Body); err != nil { t.Fatal(err) } defer res.Body.Close() @@ -2133,14 +1751,12 @@ func TestTransportDisableKeepAlives(t *testing.T) { // but when things are totally idle, it still needs to close. func TestTransportDisableKeepAlives_Concurrency(t *testing.T) { const D = 25 * time.Millisecond - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { time.Sleep(D) io.WriteString(w, "hi") }, - optOnlyServer, ) - defer st.Close() var dials int32 var conns sync.WaitGroup @@ -2175,12 +1791,12 @@ func TestTransportDisableKeepAlives_Concurrency(t *testing.T) { } go func() { defer reqs.Done() - res, err := c.Get(st.ts.URL) + res, err := c.Get(ts.URL) if err != nil { t.Error(err) return } - if _, err := ioutil.ReadAll(res.Body); err != nil { + if _, err := io.ReadAll(res.Body); err != nil { t.Error(err) return } @@ -2224,68 +1840,62 @@ func TestTransportResponseHeaderTimeout_Body(t *testing.T) { } func testTransportResponseHeaderTimeout(t *testing.T, body bool) { - ct := newClientTester(t) - ct.tr.t1 = &http.Transport{ - ResponseHeaderTimeout: 5 * time.Millisecond, - } - ct.client = func() error { - c := &http.Client{Transport: ct.tr} - var err error - var n int64 - const bodySize = 4 << 20 - if body { - _, err = c.Post("https://dummy.tld/", "text/foo", io.LimitReader(countingReader{&n}, bodySize)) - } else { - _, err = c.Get("https://dummy.tld/") - } - if !isTimeout(err) { - t.Errorf("client expected timeout error; got %#v", err) - } - if body && n != bodySize { - t.Errorf("only read %d bytes of body; want %d", n, bodySize) + const bodySize = 4 << 20 + tc := newTestClientConn(t, func(tr *Transport) { + tr.t1 = &http.Transport{ + ResponseHeaderTimeout: 5 * time.Millisecond, } - return nil + }) + tc.greet() + + var req *http.Request + var reqBody *testRequestBody + if body { + reqBody = tc.newRequestBody() + reqBody.writeBytes(bodySize) + reqBody.closeWithError(io.EOF) + req, _ = http.NewRequest("POST", "https://dummy.tld/", reqBody) + req.Header.Set("Content-Type", "text/foo") + } else { + req, _ = http.NewRequest("GET", "https://dummy.tld/", nil) } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - t.Logf("ReadFrame: %v", err) - return nil - } - switch f := f.(type) { - case *DataFrame: - dataLen := len(f.Data()) - if dataLen > 0 { - if err := ct.fr.WriteWindowUpdate(0, uint32(dataLen)); err != nil { - return err - } - if err := ct.fr.WriteWindowUpdate(f.StreamID, uint32(dataLen)); err != nil { - return err - } - } - case *RSTStreamFrame: - if f.StreamID == 1 && f.ErrCode == ErrCodeCancel { - return nil - } - } - } + + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + + tc.writeWindowUpdate(0, bodySize) + tc.writeWindowUpdate(rt.streamID(), bodySize) + + if body { + tc.wantData(wantData{ + endStream: true, + size: bodySize, + multiple: true, + }) + } + + tc.advance(4 * time.Millisecond) + if rt.done() { + t.Fatalf("RoundTrip is done after 4ms; want still waiting") + } + tc.advance(1 * time.Millisecond) + + if err := rt.err(); !isTimeout(err) { + t.Fatalf("RoundTrip error: %v; want timeout error", err) } - ct.run() } func TestTransportDisableCompression(t *testing.T) { const body = "sup" - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { want := http.Header{ "User-Agent": []string{"Go-http-client/2.0"}, } if !reflect.DeepEqual(r.Header, want) { t.Errorf("request headers = %v; want %v", r.Header, want) } - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{ TLSClientConfig: tlsConfigInsecure, @@ -2295,7 +1905,7 @@ func TestTransportDisableCompression(t *testing.T) { } defer tr.CloseIdleConnections() - req, err := http.NewRequest("GET", st.ts.URL, nil) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } @@ -2308,15 +1918,14 @@ func TestTransportDisableCompression(t *testing.T) { // RFC 7540 section 8.1.2.2 func TestTransportRejectsConnHeaders(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { var got []string for k := range r.Header { got = append(got, k) } sort.Strings(got) w.Header().Set("Got-Header", strings.Join(got, ",")) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() @@ -2404,7 +2013,7 @@ func TestTransportRejectsConnHeaders(t *testing.T) { } for _, tt := range tests { - req, _ := http.NewRequest("GET", st.ts.URL, nil) + req, _ := http.NewRequest("GET", ts.URL, nil) req.Header[tt.key] = tt.value res, err := tr.RoundTrip(req) var got string @@ -2458,14 +2067,13 @@ func TestTransportRejectsContentLengthWithSign(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Length", tt.cl[0]) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - req, _ := http.NewRequest("HEAD", st.ts.URL, nil) + req, _ := http.NewRequest("HEAD", ts.URL, nil) res, err := tr.RoundTrip(req) var got string @@ -2484,19 +2092,20 @@ func TestTransportRejectsContentLengthWithSign(t *testing.T) { } // golang.org/issue/14048 -func TestTransportFailsOnInvalidHeaders(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { +// golang.org/issue/64766 +func TestTransportFailsOnInvalidHeadersAndTrailers(t *testing.T) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { var got []string for k := range r.Header { got = append(got, k) } sort.Strings(got) w.Header().Set("Got-Header", strings.Join(got, ",")) - }, optOnlyServer) - defer st.Close() + }) tests := [...]struct { h http.Header + t http.Header wantErr string }{ 0: { @@ -2515,14 +2124,23 @@ func TestTransportFailsOnInvalidHeaders(t *testing.T) { h: http.Header{"foo": {"foo\x01bar"}}, wantErr: `invalid HTTP header value for header "foo"`, }, + 4: { + t: http.Header{"foo": {"foo\x01bar"}}, + wantErr: `invalid HTTP trailer value for header "foo"`, + }, + 5: { + t: http.Header{"x-\r\nda": {"foo\x01bar"}}, + wantErr: `invalid HTTP trailer name "x-\r\nda"`, + }, } tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() for i, tt := range tests { - req, _ := http.NewRequest("GET", st.ts.URL, nil) + req, _ := http.NewRequest("GET", ts.URL, nil) req.Header = tt.h + req.Trailer = tt.t res, err := tr.RoundTrip(req) var bad bool if tt.wantErr == "" { @@ -2549,7 +2167,7 @@ func TestTransportFailsOnInvalidHeaders(t *testing.T) { // the first Read call's gzip.NewReader returning an error. func TestGzipReader_DoubleReadCrash(t *testing.T) { gz := &gzipReader{ - body: ioutil.NopCloser(strings.NewReader("0123456789")), + body: io.NopCloser(strings.NewReader("0123456789")), } var buf [1]byte n, err1 := gz.Read(buf[:]) @@ -2568,7 +2186,7 @@ func TestGzipReader_ReadAfterClose(t *testing.T) { w.Write([]byte("012345679")) w.Close() gz := &gzipReader{ - body: ioutil.NopCloser(&body), + body: io.NopCloser(&body), } var buf [1]byte n, err := gz.Read(buf[:]) @@ -2658,115 +2276,61 @@ func TestTransportNewTLSConfig(t *testing.T) { // without END_STREAM, followed by a 0-length DATA frame with // END_STREAM. Make sure we don't get confused by that. (We did.) func TestTransportReadHeadResponse(t *testing.T) { - ct := newClientTester(t) - clientDone := make(chan struct{}) - ct.client = func() error { - defer close(clientDone) - req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return err - } - if res.ContentLength != 123 { - return fmt.Errorf("Content-Length = %d; want 123", res.ContentLength) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("ReadAll: %v", err) - } - if len(slurp) > 0 { - return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) - } - return nil - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - t.Logf("ReadFrame: %v", err) - return nil - } - hf, ok := f.(*HeadersFrame) - if !ok { - continue - } - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, // as the GFE does - BlockFragment: buf.Bytes(), - }) - ct.fr.WriteData(hf.StreamID, true, nil) + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, // as the GFE does + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + "content-length", "123", + ), + }) + tc.writeData(rt.streamID(), true, nil) - <-clientDone - return nil - } + res := rt.response() + if res.ContentLength != 123 { + t.Fatalf("Content-Length = %d; want 123", res.ContentLength) } - ct.run() + rt.wantBody(nil) } func TestTransportReadHeadResponseWithBody(t *testing.T) { - // This test use not valid response format. - // Discarding logger output to not spam tests output. - log.SetOutput(ioutil.Discard) + // This test uses an invalid response format. + // Discard logger output to not spam tests output. + log.SetOutput(io.Discard) defer log.SetOutput(os.Stderr) response := "redirecting to /elsewhere" - ct := newClientTester(t) - clientDone := make(chan struct{}) - ct.client = func() error { - defer close(clientDone) - req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return err - } - if res.ContentLength != int64(len(response)) { - return fmt.Errorf("Content-Length = %d; want %d", res.ContentLength, len(response)) - } - slurp, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("ReadAll: %v", err) - } - if len(slurp) > 0 { - return fmt.Errorf("Unexpected non-empty ReadAll body: %q", slurp) - } - return nil - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - t.Logf("ReadFrame: %v", err) - return nil - } - hf, ok := f.(*HeadersFrame) - if !ok { - continue - } - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: strconv.Itoa(len(response))}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - ct.fr.WriteData(hf.StreamID, true, []byte(response)) + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("HEAD", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + "content-length", strconv.Itoa(len(response)), + ), + }) + tc.writeData(rt.streamID(), true, []byte(response)) - <-clientDone - return nil - } + res := rt.response() + if res.ContentLength != int64(len(response)) { + t.Fatalf("Content-Length = %d; want %d", res.ContentLength, len(response)) } - ct.run() + rt.wantBody(nil) } type neverEnding byte @@ -2784,11 +2348,10 @@ func (b neverEnding) Read(p []byte) (int, error) { // runs out of flow control tokens) func TestTransportHandlerBodyClose(t *testing.T) { const bodySize = 10 << 20 - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { r.Body.Close() io.Copy(w, io.LimitReader(neverEnding('A'), bodySize)) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() @@ -2797,7 +2360,7 @@ func TestTransportHandlerBodyClose(t *testing.T) { const numReq = 10 for i := 0; i < numReq; i++ { - req, err := http.NewRequest("POST", st.ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) + req, err := http.NewRequest("POST", ts.URL, struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) if err != nil { t.Fatal(err) } @@ -2805,7 +2368,7 @@ func TestTransportHandlerBodyClose(t *testing.T) { if err != nil { t.Fatal(err) } - n, err := io.Copy(ioutil.Discard, res.Body) + n, err := io.Copy(io.Discard, res.Body) res.Body.Close() if n != bodySize || err != nil { t.Fatalf("req#%d: Copy = %d, %v; want %d, nil", i, n, err, bodySize) @@ -2830,7 +2393,7 @@ func TestTransportFlowControl(t *testing.T) { } var wrote int64 // updated atomically - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { b := make([]byte, bufLen) for wrote < total { n, err := w.Write(b) @@ -2841,11 +2404,11 @@ func TestTransportFlowControl(t *testing.T) { } w.(http.Flusher).Flush() } - }, optOnlyServer) + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - req, err := http.NewRequest("GET", st.ts.URL, nil) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal("NewRequest error:", err) } @@ -2891,190 +2454,128 @@ func TestTransportUsesGoAwayDebugError_Body(t *testing.T) { } func testTransportUsesGoAwayDebugError(t *testing.T, failMidBody bool) { - ct := newClientTester(t) - clientDone := make(chan struct{}) + tc := newTestClientConn(t) + tc.greet() const goAwayErrCode = ErrCodeHTTP11Required // arbitrary const goAwayDebugData = "some debug data" - ct.client = func() error { - defer close(clientDone) - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if failMidBody { - if err != nil { - return fmt.Errorf("unexpected client RoundTrip error: %v", err) - } - _, err = io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - } - want := GoAwayError{ - LastStreamID: 5, - ErrCode: goAwayErrCode, - DebugData: goAwayDebugData, - } - if !reflect.DeepEqual(err, want) { - t.Errorf("RoundTrip error = %T: %#v, want %T (%#v)", err, err, want, want) - } - return nil - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - t.Logf("ReadFrame: %v", err) - return nil - } - hf, ok := f.(*HeadersFrame) - if !ok { - continue - } - if failMidBody { - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "123"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - } - // Write two GOAWAY frames, to test that the Transport takes - // the interesting parts of both. - ct.fr.WriteGoAway(5, ErrCodeNo, []byte(goAwayDebugData)) - ct.fr.WriteGoAway(5, goAwayErrCode, nil) - ct.sc.(*net.TCPConn).CloseWrite() - if runtime.GOOS == "plan9" { - // CloseWrite not supported on Plan 9; Issue 17906 - ct.sc.(*net.TCPConn).Close() - } - <-clientDone - return nil - } + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + + if failMidBody { + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + "content-length", "123", + ), + }) } - ct.run() -} -func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) { - ct := newClientTester(t) + // Write two GOAWAY frames, to test that the Transport takes + // the interesting parts of both. + tc.writeGoAway(5, ErrCodeNo, []byte(goAwayDebugData)) + tc.writeGoAway(5, goAwayErrCode, nil) + tc.closeWrite() - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) + res, err := rt.result() + whence := "RoundTrip" + if failMidBody { + whence = "Body.Read" if err != nil { - return err - } - - if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 { - return fmt.Errorf("body read = %v, %v; want 1, nil", n, err) + t.Fatalf("RoundTrip error = %v, want success", err) } - res.Body.Close() // leaving 4999 bytes unread + _, err = res.Body.Read(make([]byte, 1)) + } - return nil + want := GoAwayError{ + LastStreamID: 5, + ErrCode: goAwayErrCode, + DebugData: goAwayDebugData, } - ct.server = func() error { - ct.greet() + if !reflect.DeepEqual(err, want) { + t.Errorf("%v error = %T: %#v, want %T (%#v)", whence, err, err, want, want) + } +} - var hf *HeadersFrame - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - switch f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - continue - } - var ok bool - hf, ok = f.(*HeadersFrame) - if !ok { - return fmt.Errorf("Got %T; want HeadersFrame", f) - } - break - } +func testTransportReturnsUnusedFlowControl(t *testing.T, oneDataFrame bool) { + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + "content-length", "5000", + ), + }) + initialInflow := tc.inflowWindow(0) + + // Two cases: + // - Send one DATA frame with 5000 bytes. + // - Send two DATA frames with 1 and 4999 bytes each. + // + // In both cases, the client should consume one byte of data, + // refund that byte, then refund the following 4999 bytes. + // + // In the second case, the server waits for the client to reset the + // stream before sending the second DATA frame. This tests the case + // where the client receives a DATA frame after it has reset the stream. + const streamNotEnded = false + if oneDataFrame { + tc.writeData(rt.streamID(), streamNotEnded, make([]byte, 5000)) + } else { + tc.writeData(rt.streamID(), streamNotEnded, make([]byte, 1)) + } - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - initialInflow := ct.inflowWindow(0) - - // Two cases: - // - Send one DATA frame with 5000 bytes. - // - Send two DATA frames with 1 and 4999 bytes each. - // - // In both cases, the client should consume one byte of data, - // refund that byte, then refund the following 4999 bytes. - // - // In the second case, the server waits for the client to reset the - // stream before sending the second DATA frame. This tests the case - // where the client receives a DATA frame after it has reset the stream. - if oneDataFrame { - ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 5000)) - } else { - ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 1)) - } + res := rt.response() + if n, err := res.Body.Read(make([]byte, 1)); err != nil || n != 1 { + t.Fatalf("body read = %v, %v; want 1, nil", n, err) + } + res.Body.Close() // leaving 4999 bytes unread + tc.sync() - wantRST := true - wantWUF := true - if !oneDataFrame { - wantWUF = false // flow control update is small, and will not be sent - } - for wantRST || wantWUF { - f, err := ct.readNonSettingsFrame() - if err != nil { - return err + sentAdditionalData := false + tc.wantUnorderedFrames( + func(f *RSTStreamFrame) bool { + if f.ErrCode != ErrCodeCancel { + t.Fatalf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f)) } - switch f := f.(type) { - case *RSTStreamFrame: - if !wantRST { - return fmt.Errorf("Unexpected frame: %v", summarizeFrame(f)) - } - if f.ErrCode != ErrCodeCancel { - return fmt.Errorf("Expected a RSTStreamFrame with code cancel; got %v", summarizeFrame(f)) - } - wantRST = false - case *WindowUpdateFrame: - if !wantWUF { - return fmt.Errorf("Unexpected frame: %v", summarizeFrame(f)) - } - if f.Increment != 5000 { - return fmt.Errorf("Expected WindowUpdateFrames for 5000 bytes; got %v", summarizeFrame(f)) - } - wantWUF = false - default: - return fmt.Errorf("Unexpected frame: %v", summarizeFrame(f)) + if !oneDataFrame { + // Send the remaining data now. + tc.writeData(rt.streamID(), streamNotEnded, make([]byte, 4999)) + sentAdditionalData = true } - } - if !oneDataFrame { - ct.fr.WriteData(hf.StreamID, false /* don't end stream */, make([]byte, 4999)) - f, err := ct.readNonSettingsFrame() - if err != nil { - return err + return true + }, + func(f *PingFrame) bool { + return true + }, + func(f *WindowUpdateFrame) bool { + if !oneDataFrame && !sentAdditionalData { + t.Fatalf("Got WindowUpdateFrame, don't expect one yet") } - wuf, ok := f.(*WindowUpdateFrame) - if !ok || wuf.Increment != 5000 { - return fmt.Errorf("want WindowUpdateFrame for 5000 bytes; got %v", summarizeFrame(f)) + if f.Increment != 5000 { + t.Fatalf("Expected WindowUpdateFrames for 5000 bytes; got %v", summarizeFrame(f)) } - } - if err := ct.writeReadPing(); err != nil { - return err - } - if got, want := ct.inflowWindow(0), initialInflow; got != want { - return fmt.Errorf("connection flow tokens = %v, want %v", got, want) - } - return nil + return true + }, + ) + + if got, want := tc.inflowWindow(0), initialInflow; got != want { + t.Fatalf("connection flow tokens = %v, want %v", got, want) } - ct.run() } // See golang.org/issue/16481 @@ -3090,199 +2591,124 @@ func TestTransportReturnsUnusedFlowControlMultipleWrites(t *testing.T) { // Issue 16612: adjust flow control on open streams when transport // receives SETTINGS with INITIAL_WINDOW_SIZE from server. func TestTransportAdjustsFlowControl(t *testing.T) { - ct := newClientTester(t) - clientDone := make(chan struct{}) - const bodySize = 1 << 20 - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - if runtime.GOOS == "plan9" { - // CloseWrite not supported on Plan 9; Issue 17906 - defer ct.cc.(*net.TCPConn).Close() - } - defer close(clientDone) + tc := newTestClientConn(t) + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + // Don't write our SETTINGS yet. - req, _ := http.NewRequest("POST", "https://dummy.tld/", struct{ io.Reader }{io.LimitReader(neverEnding('A'), bodySize)}) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return err + body := tc.newRequestBody() + body.writeBytes(bodySize) + body.closeWithError(io.EOF) + + req, _ := http.NewRequest("POST", "https://dummy.tld/", body) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + + gotBytes := int64(0) + for { + f := readFrame[*DataFrame](t, tc) + gotBytes += int64(len(f.Data())) + // After we've got half the client's initial flow control window's worth + // of request body data, give it just enough flow control to finish. + if gotBytes >= initialWindowSize/2 { + break } - res.Body.Close() - return nil } - ct.server = func() error { - _, err := io.ReadFull(ct.sc, make([]byte, len(ClientPreface))) - if err != nil { - return fmt.Errorf("reading client preface: %v", err) - } - var gotBytes int64 - var sentSettings bool - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - return nil - default: - return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - } - switch f := f.(type) { - case *DataFrame: - gotBytes += int64(len(f.Data())) - // After we've got half the client's - // initial flow control window's worth - // of request body data, give it just - // enough flow control to finish. - if gotBytes >= initialWindowSize/2 && !sentSettings { - sentSettings = true - - ct.fr.WriteSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize}) - ct.fr.WriteWindowUpdate(0, bodySize) - ct.fr.WriteSettingsAck() - } + tc.writeSettings(Setting{ID: SettingInitialWindowSize, Val: bodySize}) + tc.writeWindowUpdate(0, bodySize) + tc.writeSettingsAck() - if f.StreamEnded() { - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - } - } - } + tc.wantUnorderedFrames( + func(f *SettingsFrame) bool { return true }, + func(f *DataFrame) bool { + gotBytes += int64(len(f.Data())) + return f.StreamEnded() + }, + ) + + if gotBytes != bodySize { + t.Fatalf("server received %v bytes of body, want %v", gotBytes, bodySize) } - ct.run() + + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt.wantStatus(200) } // See golang.org/issue/16556 func TestTransportReturnsDataPaddingFlowControl(t *testing.T) { - ct := newClientTester(t) - - unblockClient := make(chan bool, 1) - - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return err - } - defer res.Body.Close() - <-unblockClient - return nil - } - ct.server = func() error { - ct.greet() + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + "content-length", "5000", + ), + }) - var hf *HeadersFrame - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - switch f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - continue - } - var ok bool - hf, ok = f.(*HeadersFrame) - if !ok { - return fmt.Errorf("Got %T; want HeadersFrame", f) - } - break - } + initialConnWindow := tc.inflowWindow(0) + initialStreamWindow := tc.inflowWindow(rt.streamID()) - initialConnWindow := ct.inflowWindow(0) + pad := make([]byte, 5) + tc.writeDataPadded(rt.streamID(), false, make([]byte, 5000), pad) - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "content-length", Value: "5000"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - initialStreamWindow := ct.inflowWindow(hf.StreamID) - pad := make([]byte, 5) - ct.fr.WriteDataPadded(hf.StreamID, false, make([]byte, 5000), pad) // without ending stream - if err := ct.writeReadPing(); err != nil { - return err - } - // Padding flow control should have been returned. - if got, want := ct.inflowWindow(0), initialConnWindow-5000; got != want { - t.Errorf("conn inflow window = %v, want %v", got, want) - } - if got, want := ct.inflowWindow(hf.StreamID), initialStreamWindow-5000; got != want { - t.Errorf("stream inflow window = %v, want %v", got, want) - } - unblockClient <- true - return nil + // Padding flow control should have been returned. + if got, want := tc.inflowWindow(0), initialConnWindow-5000; got != want { + t.Errorf("conn inflow window = %v, want %v", got, want) + } + if got, want := tc.inflowWindow(rt.streamID()), initialStreamWindow-5000; got != want { + t.Errorf("stream inflow window = %v, want %v", got, want) } - ct.run() } // golang.org/issue/16572 -- RoundTrip shouldn't hang when it gets a // StreamError as a result of the response HEADERS func TestTransportReturnsErrorOnBadResponseHeaders(t *testing.T) { - ct := newClientTester(t) + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + " content-type", "bogus", + ), + }) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err == nil { - res.Body.Close() - return errors.New("unexpected successful GET") - } - want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")} - if !reflect.DeepEqual(want, err) { - t.Errorf("RoundTrip error = %#v; want %#v", err, want) - } - return nil + err := rt.err() + want := StreamError{1, ErrCodeProtocol, headerFieldNameError(" content-type")} + if !reflect.DeepEqual(err, want) { + t.Fatalf("RoundTrip error = %#v; want %#v", err, want) } - ct.server = func() error { - ct.greet() - - hf, err := ct.firstHeaders() - if err != nil { - return err - } - - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: " content-type", Value: "bogus"}) // bogus spaces - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - - for { - fr, err := ct.readFrame() - if err != nil { - return fmt.Errorf("error waiting for RST_STREAM from client: %v", err) - } - if _, ok := fr.(*SettingsFrame); ok { - continue - } - if rst, ok := fr.(*RSTStreamFrame); !ok || rst.StreamID != 1 || rst.ErrCode != ErrCodeProtocol { - t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr)) - } - break - } - return nil + fr := readFrame[*RSTStreamFrame](t, tc) + if fr.StreamID != 1 || fr.ErrCode != ErrCodeProtocol { + t.Errorf("Frame = %v; want RST_STREAM for stream 1 with ErrCodeProtocol", summarizeFrame(fr)) } - ct.run() } // byteAndEOFReader returns is in an io.Reader which reads one byte @@ -3307,16 +2733,15 @@ func (b byteAndEOFReader) Read(p []byte) (n int, err error) { // which returns (non-0, io.EOF) and also needs to set the ContentLength // explicitly. func TestTransportBodyDoubleEndStream(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { // Nothing. - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() for i := 0; i < 2; i++ { - req, _ := http.NewRequest("POST", st.ts.URL, byteAndEOFReader('a')) + req, _ := http.NewRequest("POST", ts.URL, byteAndEOFReader('a')) req.ContentLength = 1 res, err := tr.RoundTrip(req) if err != nil { @@ -3459,16 +2884,17 @@ func TestTransportRequestPathPseudo(t *testing.T) { // before we've determined that the ClientConn is usable. func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) { const body = "foo" - req, _ := http.NewRequest("POST", "http://foo.com/", ioutil.NopCloser(strings.NewReader(body))) + req, _ := http.NewRequest("POST", "http://foo.com/", io.NopCloser(strings.NewReader(body))) cc := &ClientConn{ closed: true, reqHeaderMu: make(chan struct{}, 1), + t: &Transport{}, } _, err := cc.RoundTrip(req) if err != errClientConnUnusable { t.Fatalf("RoundTrip = %v; want errClientConnUnusable", err) } - slurp, err := ioutil.ReadAll(req.Body) + slurp, err := io.ReadAll(req.Body) if err != nil { t.Errorf("ReadAll = %v", err) } @@ -3478,12 +2904,11 @@ func TestRoundTripDoesntConsumeRequestBodyEarly(t *testing.T) { } func TestClientConnPing(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer) - defer st.Close() + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {}) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() ctx := context.Background() - cc, err := tr.dialClientConn(ctx, st.ts.Listener.Addr().String(), false) + cc, err := tr.dialClientConn(ctx, ts.Listener.Addr().String(), false) if err != nil { t.Fatal(err) } @@ -3501,7 +2926,7 @@ func TestTransportCancelDataResponseRace(t *testing.T) { clientGotResponse := make(chan bool, 1) const msg = "Hello." - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/hello") { time.Sleep(50 * time.Millisecond) io.WriteString(w, msg) @@ -3516,29 +2941,28 @@ func TestTransportCancelDataResponseRace(t *testing.T) { } time.Sleep(10 * time.Millisecond) } - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() c := &http.Client{Transport: tr} - req, _ := http.NewRequest("GET", st.ts.URL, nil) + req, _ := http.NewRequest("GET", ts.URL, nil) req.Cancel = cancel res, err := c.Do(req) clientGotResponse <- true if err != nil { t.Fatal(err) } - if _, err = io.Copy(ioutil.Discard, res.Body); err == nil { + if _, err = io.Copy(io.Discard, res.Body); err == nil { t.Fatal("unexpected success") } - res, err = c.Get(st.ts.URL + "/hello") + res, err = c.Get(ts.URL + "/hello") if err != nil { t.Fatal(err) } - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) if err != nil { t.Fatal(err) } @@ -3550,21 +2974,20 @@ func TestTransportCancelDataResponseRace(t *testing.T) { // Issue 21316: It should be safe to reuse an http.Request after the // request has completed. func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) io.WriteString(w, "body") - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - req, _ := http.NewRequest("GET", st.ts.URL, nil) + req, _ := http.NewRequest("GET", ts.URL, nil) resp, err := tr.RoundTrip(req) if err != nil { t.Fatal(err) } - if _, err = io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err = io.Copy(io.Discard, resp.Body); err != nil { t.Fatalf("error reading response body: %v", err) } if err := resp.Body.Close(); err != nil { @@ -3576,34 +2999,30 @@ func TestTransportNoRaceOnRequestObjectAfterRequestComplete(t *testing.T) { } func TestTransportCloseAfterLostPing(t *testing.T) { - clientDone := make(chan struct{}) - ct := newClientTester(t) - ct.tr.PingTimeout = 1 * time.Second - ct.tr.ReadIdleTimeout = 1 * time.Second - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - defer close(clientDone) - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - _, err := ct.tr.RoundTrip(req) - if err == nil || !strings.Contains(err.Error(), "client connection lost") { - return fmt.Errorf("expected to get error about \"connection lost\", got %v", err) - } - return nil - } - ct.server = func() error { - ct.greet() - <-clientDone - return nil + tc := newTestClientConn(t, func(tr *Transport) { + tr.PingTimeout = 1 * time.Second + tr.ReadIdleTimeout = 1 * time.Second + }) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + tc.wantFrameType(FrameHeaders) + + tc.advance(1 * time.Second) + tc.wantFrameType(FramePing) + + tc.advance(1 * time.Second) + err := rt.err() + if err == nil || !strings.Contains(err.Error(), "client connection lost") { + t.Fatalf("expected to get error about \"connection lost\", got %v", err) } - ct.run() } func TestTransportPingWriteBlocks(t *testing.T) { - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {}, - optOnlyServer, ) - defer st.Close() tr := &Transport{ TLSClientConfig: tlsConfigInsecure, DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { @@ -3622,424 +3041,327 @@ func TestTransportPingWriteBlocks(t *testing.T) { } defer tr.CloseIdleConnections() c := &http.Client{Transport: tr} - _, err := c.Get(st.ts.URL) + _, err := c.Get(ts.URL) if err == nil { t.Fatalf("Get = nil, want error") } } -func TestTransportPingWhenReading(t *testing.T) { - testCases := []struct { - name string - readIdleTimeout time.Duration - deadline time.Duration - expectedPingCount int - }{ - { - name: "two pings", - readIdleTimeout: 100 * time.Millisecond, - deadline: time.Second, - expectedPingCount: 2, - }, - { - name: "zero ping", - readIdleTimeout: time.Second, - deadline: 200 * time.Millisecond, - expectedPingCount: 0, - }, - { - name: "0 readIdleTimeout means no ping", - readIdleTimeout: 0 * time.Millisecond, - deadline: 500 * time.Millisecond, - expectedPingCount: 0, - }, - } - - for _, tc := range testCases { - tc := tc // capture range variable - t.Run(tc.name, func(t *testing.T) { - testTransportPingWhenReading(t, tc.readIdleTimeout, tc.deadline, tc.expectedPingCount) - }) - } -} +func TestTransportPingWhenReadingMultiplePings(t *testing.T) { + tc := newTestClientConn(t, func(tr *Transport) { + tr.ReadIdleTimeout = 1000 * time.Millisecond + }) + tc.greet() -func testTransportPingWhenReading(t *testing.T, readIdleTimeout, deadline time.Duration, expectedPingCount int) { - var pingCount int - ct := newClientTester(t) - ct.tr.ReadIdleTimeout = readIdleTimeout + ctx, cancel := context.WithCancel(context.Background()) + req, _ := http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) - ctx, cancel := context.WithTimeout(context.Background(), deadline) - defer cancel() - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - if runtime.GOOS == "plan9" { - // CloseWrite not supported on Plan 9; Issue 17906 - defer ct.cc.(*net.TCPConn).Close() - } - req, _ := http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip: %v", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return fmt.Errorf("status code = %v; want %v", res.StatusCode, 200) - } - _, err = ioutil.ReadAll(res.Body) - if expectedPingCount == 0 && errors.Is(ctx.Err(), context.DeadlineExceeded) { - return nil + for i := 0; i < 5; i++ { + // No ping yet... + tc.advance(999 * time.Millisecond) + if f := tc.readFrame(); f != nil { + t.Fatalf("unexpected frame: %v", f) } - cancel() - return err + // ...ping now. + tc.advance(1 * time.Millisecond) + f := readFrame[*PingFrame](t, tc) + tc.writePing(true, f.Data) } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - var streamID uint32 - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-ctx.Done(): - // If the client's done, it - // will have reported any - // errors on its side. - return nil - default: - return err - } - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - if !f.HeadersEnded() { - return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) - } - enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(200)}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - streamID = f.StreamID - case *PingFrame: - pingCount++ - if pingCount == expectedPingCount { - if err := ct.fr.WriteData(streamID, true, []byte("hello, this is last server data frame")); err != nil { - return err - } - } - if err := ct.fr.WritePing(true, f.Data); err != nil { - return err - } - case *RSTStreamFrame: - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - } + // Cancel the request, Transport resets it and returns an error from body reads. + cancel() + tc.sync() + + tc.wantFrameType(FrameRSTStream) + _, err := rt.readBody() + if err == nil { + t.Fatalf("Response.Body.Read() = %v, want error", err) } - ct.run() } -func testClientMultipleDials(t *testing.T, client func(*Transport), server func(int, *clientTester)) { - ln := newLocalListener(t) - defer ln.Close() +func TestTransportPingWhenReadingPingDisabled(t *testing.T) { + tc := newTestClientConn(t, func(tr *Transport) { + tr.ReadIdleTimeout = 0 // PINGs disabled + }) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) - var ( - mu sync.Mutex - count int - conns []net.Conn - ) - var wg sync.WaitGroup - tr := &Transport{ - TLSClientConfig: tlsConfigInsecure, - } - tr.DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { - mu.Lock() - defer mu.Unlock() - count++ - cc, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - return nil, fmt.Errorf("dial error: %v", err) - } - conns = append(conns, cc) - sc, err := ln.Accept() - if err != nil { - return nil, fmt.Errorf("accept error: %v", err) - } - conns = append(conns, sc) - ct := &clientTester{ - t: t, - tr: tr, - cc: cc, - sc: sc, - fr: NewFramer(sc, sc), - } - wg.Add(1) - go func(count int) { - defer wg.Done() - server(count, ct) - }(count) - return cc, nil + // No PING is sent, even after a long delay. + tc.advance(1 * time.Minute) + if f := tc.readFrame(); f != nil { + t.Fatalf("unexpected frame: %v", f) } +} - client(tr) - tr.CloseIdleConnections() - ln.Close() - for _, c := range conns { - c.Close() +func TestTransportRetryAfterGOAWAYNoRetry(t *testing.T) { + tt := newTestTransport(t) + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tt.roundTrip(req) + + // First attempt: Server sends a GOAWAY with an error and + // a MaxStreamID less than the request ID. + // This probably indicates that there was something wrong with our request, + // so we don't retry it. + tc := tt.getConn() + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + tc.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) + tc.writeSettings() + tc.writeGoAway(0 /*max id*/, ErrCodeInternal, nil) + if rt.err() == nil { + t.Fatalf("after GOAWAY, RoundTrip is not done, want error") } - wg.Wait() } -func TestTransportRetryAfterGOAWAY(t *testing.T) { - client := func(tr *Transport) { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := tr.RoundTrip(req) - if res != nil { - res.Body.Close() - if got := res.Header.Get("Foo"); got != "bar" { - err = fmt.Errorf("foo header = %q; want bar", got) - } - } - if err != nil { - t.Errorf("RoundTrip: %v", err) - } - } +func TestTransportRetryAfterGOAWAYRetry(t *testing.T) { + tt := newTestTransport(t) - server := func(count int, ct *clientTester) { - switch count { - case 1: - ct.greet() - hf, err := ct.firstHeaders() - if err != nil { - t.Errorf("server1 failed reading HEADERS: %v", err) - return - } - t.Logf("server1 got %v", hf) - if err := ct.fr.WriteGoAway(0 /*max id*/, ErrCodeNo, nil); err != nil { - t.Errorf("server1 failed writing GOAWAY: %v", err) - return - } - case 2: - ct.greet() - hf, err := ct.firstHeaders() - if err != nil { - t.Errorf("server2 failed reading HEADERS: %v", err) - return - } - t.Logf("server2 got %v", hf) - - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "foo", Value: "bar"}) - err = ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - if err != nil { - t.Errorf("server2 failed writing response HEADERS: %v", err) - } - default: - t.Errorf("unexpected number of dials") - return - } - } + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tt.roundTrip(req) - testClientMultipleDials(t, client, server) + // First attempt: Server sends a GOAWAY with ErrCodeNo and + // a MaxStreamID less than the request ID. + // We take the server at its word that nothing has really gone wrong, + // and retry the request. + tc := tt.getConn() + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + tc.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) + tc.writeSettings() + tc.writeGoAway(0 /*max id*/, ErrCodeNo, nil) + if rt.done() { + t.Fatalf("after GOAWAY, RoundTrip is done; want it to be retrying") + } + + // Second attempt succeeds on a new connection. + tc = tt.getConn() + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + tc.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) + tc.writeSettings() + tc.writeHeaders(HeadersFrameParam{ + StreamID: 1, + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + + rt.wantStatus(200) +} + +func TestTransportRetryAfterGOAWAYSecondRequest(t *testing.T) { + tt := newTestTransport(t) + + // First request succeeds. + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt1 := tt.roundTrip(req) + tc := tt.getConn() + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + tc.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) + tc.writeSettings() + tc.wantFrameType(FrameSettings) // Settings ACK + tc.writeHeaders(HeadersFrameParam{ + StreamID: 1, + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt1.wantStatus(200) + + // Second request: Server sends a GOAWAY with + // a MaxStreamID less than the request ID. + // The server says it didn't see this request, + // so we retry it on a new connection. + req, _ = http.NewRequest("GET", "https://dummy.tld/", nil) + rt2 := tt.roundTrip(req) + + // Second request, first attempt. + tc.wantHeaders(wantHeader{ + streamID: 3, + endStream: true, + }) + tc.writeSettings() + tc.writeGoAway(1 /*max id*/, ErrCodeProtocol, nil) + if rt2.done() { + t.Fatalf("after GOAWAY, RoundTrip is done; want it to be retrying") + } + + // Second request, second attempt. + tc = tt.getConn() + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + tc.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) + tc.writeSettings() + tc.writeHeaders(HeadersFrameParam{ + StreamID: 1, + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt2.wantStatus(200) } func TestTransportRetryAfterRefusedStream(t *testing.T) { - clientDone := make(chan struct{}) - client := func(tr *Transport) { - defer close(clientDone) - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - resp, err := tr.RoundTrip(req) - if err != nil { - t.Errorf("RoundTrip: %v", err) - return - } - resp.Body.Close() - if resp.StatusCode != 204 { - t.Errorf("Status = %v; want 204", resp.StatusCode) - return - } + tt := newTestTransport(t) + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tt.roundTrip(req) + + // First attempt: Server sends a RST_STREAM. + tc := tt.getConn() + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + tc.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) + tc.writeSettings() + tc.wantFrameType(FrameSettings) // settings ACK + tc.writeRSTStream(1, ErrCodeRefusedStream) + if rt.done() { + t.Fatalf("after RST_STREAM, RoundTrip is done; want it to be retrying") } - server := func(_ int, ct *clientTester) { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - var count int - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - // If the client's done, it - // will have reported any - // errors on its side. - default: - t.Error(err) - } - return - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - if !f.HeadersEnded() { - t.Errorf("headers should have END_HEADERS be ended: %v", f) - return - } - count++ - if count == 1 { - ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) - } else { - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - } - default: - t.Errorf("Unexpected client frame %v", f) - return - } - } - } + // Second attempt succeeds on the same connection. + tc.wantHeaders(wantHeader{ + streamID: 3, + endStream: true, + }) + tc.writeSettings() + tc.writeHeaders(HeadersFrameParam{ + StreamID: 3, + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "204", + ), + }) - testClientMultipleDials(t, client, server) + rt.wantStatus(204) } func TestTransportRetryHasLimit(t *testing.T) { - // Skip in short mode because the total expected delay is 1s+2s+4s+8s+16s=29s. - if testing.Short() { - t.Skip("skipping long test in short mode") - } - retryBackoffHook = func(d time.Duration) *time.Timer { - return time.NewTimer(0) // fires immediately - } - defer func() { - retryBackoffHook = nil - }() - clientDone := make(chan struct{}) - ct := newClientTester(t) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - if runtime.GOOS == "plan9" { - // CloseWrite not supported on Plan 9; Issue 17906 - defer ct.cc.(*net.TCPConn).Close() - } - defer close(clientDone) - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - resp, err := ct.tr.RoundTrip(req) - if err == nil { - return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) + tt := newTestTransport(t) + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tt.roundTrip(req) + + // First attempt: Server sends a GOAWAY. + tc := tt.getConn() + tc.wantFrameType(FrameSettings) + tc.wantFrameType(FrameWindowUpdate) + + var totalDelay time.Duration + count := 0 + for streamID := uint32(1); ; streamID += 2 { + count++ + tc.wantHeaders(wantHeader{ + streamID: streamID, + endStream: true, + }) + if streamID == 1 { + tc.writeSettings() + tc.wantFrameType(FrameSettings) // settings ACK } - t.Logf("expected error, got: %v", err) - return nil - } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - // If the client's done, it - // will have reported any - // errors on its side. - return nil - default: - return err - } - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - if !f.HeadersEnded() { - return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) - } - ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) - default: - return fmt.Errorf("Unexpected client frame %v", f) + tc.writeRSTStream(streamID, ErrCodeRefusedStream) + + d, scheduled := tt.group.TimeUntilEvent() + if !scheduled { + if streamID == 1 { + continue } + break } + totalDelay += d + if totalDelay > 5*time.Minute { + t.Fatalf("RoundTrip still retrying after %v, should have given up", totalDelay) + } + tt.advance(d) + } + if got, want := count, 5; got < count { + t.Errorf("RoundTrip made %v attempts, want at least %v", got, want) + } + if rt.err() == nil { + t.Errorf("RoundTrip succeeded, want error") } - ct.run() } func TestTransportResponseDataBeforeHeaders(t *testing.T) { - // This test use not valid response format. - // Discarding logger output to not spam tests output. - log.SetOutput(ioutil.Discard) - defer log.SetOutput(os.Stderr) + // Discard log output complaining about protocol error. + log.SetOutput(io.Discard) + t.Cleanup(func() { log.SetOutput(os.Stderr) }) // after other cleanup is done + + tc := newTestClientConn(t) + tc.greet() + + // First request is normal to ensure the check is per stream and not per connection. + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt1 := tc.roundTrip(req) + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt1.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt1.wantStatus(200) - ct := newClientTester(t) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - if runtime.GOOS == "plan9" { - // CloseWrite not supported on Plan 9; Issue 17906 - defer ct.cc.(*net.TCPConn).Close() - } - req := httptest.NewRequest("GET", "https://dummy.tld/", nil) - // First request is normal to ensure the check is per stream and not per connection. - _, err := ct.tr.RoundTrip(req) - if err != nil { - return fmt.Errorf("RoundTrip expected no error, got: %v", err) - } - // Second request returns a DATA frame with no HEADERS. - resp, err := ct.tr.RoundTrip(req) - if err == nil { - return fmt.Errorf("RoundTrip expected error, got response: %+v", resp) - } - if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol { - return fmt.Errorf("expected stream PROTOCOL_ERROR, got: %v", err) - } - return nil + // Second request returns a DATA frame with no HEADERS. + rt2 := tc.roundTrip(req) + tc.wantFrameType(FrameHeaders) + tc.writeData(rt2.streamID(), true, []byte("payload")) + if err, ok := rt2.err().(StreamError); !ok || err.Code != ErrCodeProtocol { + t.Fatalf("expected stream PROTOCOL_ERROR, got: %v", err) } - ct.server = func() error { - ct.greet() - for { - f, err := ct.fr.ReadFrame() - if err == io.EOF { - return nil - } else if err != nil { - return err - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame, *RSTStreamFrame: - case *HeadersFrame: - switch f.StreamID { - case 1: - // Send a valid response to first request. - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - case 3: - ct.fr.WriteData(f.StreamID, true, []byte("payload")) - } - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - } - } - ct.run() } func TestTransportMaxFrameReadSize(t *testing.T) { @@ -4053,39 +3375,27 @@ func TestTransportMaxFrameReadSize(t *testing.T) { maxReadFrameSize: 1024, want: minMaxFrameSize, }} { - ct := newClientTester(t) - ct.tr.MaxReadFrameSize = test.maxReadFrameSize - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", http.NoBody) - ct.tr.RoundTrip(req) - return nil - } - ct.server = func() error { - defer ct.cc.(*net.TCPConn).Close() - ct.greet() - var got uint32 - ct.settings.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - got = s.Val - } - return nil + t.Run(fmt.Sprint(test.maxReadFrameSize), func(t *testing.T) { + tc := newTestClientConn(t, func(tr *Transport) { + tr.MaxReadFrameSize = test.maxReadFrameSize }) - if got != test.want { + + fr := readFrame[*SettingsFrame](t, tc) + got, ok := fr.Value(SettingMaxFrameSize) + if !ok { + t.Errorf("Transport.MaxReadFrameSize = %v; server got no setting, want %v", test.maxReadFrameSize, test.want) + } else if got != test.want { t.Errorf("Transport.MaxReadFrameSize = %v; server got %v, want %v", test.maxReadFrameSize, got, test.want) } - return nil - } - ct.run() + }) } } func TestTransportRequestsLowServerLimit(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - }, optOnlyServer, func(s *Server) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { + }, func(s *Server) { s.MaxConcurrentStreams = 1 }) - defer st.Close() var ( connCountMu sync.Mutex @@ -4104,7 +3414,7 @@ func TestTransportRequestsLowServerLimit(t *testing.T) { const reqCount = 3 for i := 0; i < reqCount; i++ { - req, err := http.NewRequest("GET", st.ts.URL, nil) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } @@ -4129,324 +3439,115 @@ func TestTransportRequestsLowServerLimit(t *testing.T) { func TestTransportRequestsStallAtServerLimit(t *testing.T) { const maxConcurrent = 2 - greet := make(chan struct{}) // server sends initial SETTINGS frame - gotRequest := make(chan struct{}) // server received a request - clientDone := make(chan struct{}) - cancelClientRequest := make(chan struct{}) + tc := newTestClientConn(t, func(tr *Transport) { + tr.StrictMaxConcurrentStreams = true + }) + tc.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent}) - // Collect errors from goroutines. - var wg sync.WaitGroup - errs := make(chan error, 100) - defer func() { - wg.Wait() - close(errs) - for err := range errs { - t.Error(err) - } - }() + cancelClientRequest := make(chan struct{}) - // We will send maxConcurrent+2 requests. This checker goroutine waits for the - // following stages: - // 1. The first maxConcurrent requests are received by the server. - // 2. The client will cancel the next request - // 3. The server is unblocked so it can service the first maxConcurrent requests - // 4. The client will send the final request - wg.Add(1) - unblockClient := make(chan struct{}) - clientRequestCancelled := make(chan struct{}) - unblockServer := make(chan struct{}) - go func() { - defer wg.Done() - // Stage 1. - for k := 0; k < maxConcurrent; k++ { - <-gotRequest - } - // Stage 2. - close(unblockClient) - <-clientRequestCancelled - // Stage 3: give some time for the final RoundTrip call to be scheduled and - // verify that the final request is not sent. - time.Sleep(50 * time.Millisecond) - select { - case <-gotRequest: - errs <- errors.New("last request did not stall") - close(unblockServer) - return - default: + // Start maxConcurrent+2 requests. + // The server does not respond to any of them yet. + var rts []*testRoundTrip + for k := 0; k < maxConcurrent+2; k++ { + req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), nil) + if k == maxConcurrent { + req.Cancel = cancelClientRequest + } + rt := tc.roundTrip(req) + rts = append(rts, rt) + + if k < maxConcurrent { + // We are under the stream limit, so the client sends the request. + tc.wantHeaders(wantHeader{ + streamID: rt.streamID(), + endStream: true, + header: http.Header{ + ":authority": []string{"dummy.tld"}, + ":method": []string{"GET"}, + ":path": []string{fmt.Sprintf("/%d", k)}, + }, + }) + } else { + // We have reached the stream limit, + // so the client cannot send the request. + if fr := tc.readFrame(); fr != nil { + t.Fatalf("after making new request while at stream limit, got unexpected frame: %v", fr) + } } - close(unblockServer) - // Stage 4. - <-gotRequest - }() - ct := newClientTester(t) - ct.tr.StrictMaxConcurrentStreams = true - ct.client = func() error { - var wg sync.WaitGroup - defer func() { - wg.Wait() - close(clientDone) - ct.cc.(*net.TCPConn).CloseWrite() - if runtime.GOOS == "plan9" { - // CloseWrite not supported on Plan 9; Issue 17906 - ct.cc.(*net.TCPConn).Close() - } - }() - for k := 0; k < maxConcurrent+2; k++ { - wg.Add(1) - go func(k int) { - defer wg.Done() - // Don't send the second request until after receiving SETTINGS from the server - // to avoid a race where we use the default SettingMaxConcurrentStreams, which - // is much larger than maxConcurrent. We have to send the first request before - // waiting because the first request triggers the dial and greet. - if k > 0 { - <-greet - } - // Block until maxConcurrent requests are sent before sending any more. - if k >= maxConcurrent { - <-unblockClient - } - body := newStaticCloseChecker("") - req, _ := http.NewRequest("GET", fmt.Sprintf("https://dummy.tld/%d", k), body) - if k == maxConcurrent { - // This request will be canceled. - req.Cancel = cancelClientRequest - close(cancelClientRequest) - _, err := ct.tr.RoundTrip(req) - close(clientRequestCancelled) - if err == nil { - errs <- fmt.Errorf("RoundTrip(%d) should have failed due to cancel", k) - return - } - } else { - resp, err := ct.tr.RoundTrip(req) - if err != nil { - errs <- fmt.Errorf("RoundTrip(%d): %v", k, err) - return - } - ioutil.ReadAll(resp.Body) - resp.Body.Close() - if resp.StatusCode != 204 { - errs <- fmt.Errorf("Status = %v; want 204", resp.StatusCode) - return - } - } - if err := body.isClosed(); err != nil { - errs <- fmt.Errorf("RoundTrip(%d): %v", k, err) - } - }(k) + if rt.done() { + t.Fatalf("rt %v done", k) } - return nil } - ct.server = func() error { - var wg sync.WaitGroup - defer wg.Wait() - - ct.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent}) - - // Server write loop. - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - writeResp := make(chan uint32, maxConcurrent+1) - - wg.Add(1) - go func() { - defer wg.Done() - <-unblockServer - for id := range writeResp { - buf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "204"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: id, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - } - }() + // Cancel the maxConcurrent'th request. + // The request should fail. + close(cancelClientRequest) + tc.sync() + if err := rts[maxConcurrent].err(); err == nil { + t.Fatalf("RoundTrip(%d) should have failed due to cancel, did not", maxConcurrent) + } - // Server read loop. - var nreq int - for { - f, err := ct.fr.ReadFrame() - if err != nil { - select { - case <-clientDone: - // If the client's done, it will have reported any errors on its side. - return nil - default: - return err - } - } - switch f := f.(type) { - case *WindowUpdateFrame: - case *SettingsFrame: - // Wait for the client SETTINGS ack until ending the greet. - close(greet) - case *HeadersFrame: - if !f.HeadersEnded() { - return fmt.Errorf("headers should have END_HEADERS be ended: %v", f) - } - gotRequest <- struct{}{} - nreq++ - writeResp <- f.StreamID - if nreq == maxConcurrent+1 { - close(writeResp) - } - case *DataFrame: - default: - return fmt.Errorf("Unexpected client frame %v", f) - } + // No requests should be complete, except for the canceled one. + for i, rt := range rts { + if i != maxConcurrent && rt.done() { + t.Fatalf("RoundTrip(%d) is done, but should not be", i) } } - ct.run() + // Server responds to a request, unblocking the last one. + tc.writeHeaders(HeadersFrameParam{ + StreamID: rts[0].streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + tc.wantHeaders(wantHeader{ + streamID: rts[maxConcurrent+1].streamID(), + endStream: true, + header: http.Header{ + ":authority": []string{"dummy.tld"}, + ":method": []string{"GET"}, + ":path": []string{fmt.Sprintf("/%d", maxConcurrent+1)}, + }, + }) + rts[0].wantStatus(200) } func TestTransportMaxDecoderHeaderTableSize(t *testing.T) { - ct := newClientTester(t) var reqSize, resSize uint32 = 8192, 16384 - ct.tr.MaxDecoderHeaderTableSize = reqSize - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - cc, err := ct.tr.NewClientConn(ct.cc) - if err != nil { - return err - } - _, err = cc.RoundTrip(req) - if err != nil { - return err - } - if got, want := cc.peerMaxHeaderTableSize, resSize; got != want { - return fmt.Errorf("peerHeaderTableSize = %d, want %d", got, want) - } - return nil + tc := newTestClientConn(t, func(tr *Transport) { + tr.MaxDecoderHeaderTableSize = reqSize + }) + + fr := readFrame[*SettingsFrame](t, tc) + if v, ok := fr.Value(SettingHeaderTableSize); !ok { + t.Fatalf("missing SETTINGS_HEADER_TABLE_SIZE setting") + } else if v != reqSize { + t.Fatalf("received SETTINGS_HEADER_TABLE_SIZE = %d, want %d", v, reqSize) } - ct.server = func() error { - buf := make([]byte, len(ClientPreface)) - _, err := io.ReadFull(ct.sc, buf) - if err != nil { - return fmt.Errorf("reading client preface: %v", err) - } - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - sf, ok := f.(*SettingsFrame) - if !ok { - ct.t.Fatalf("wanted client settings frame; got %v", f) - _ = sf // stash it away? - } - var found bool - err = sf.ForeachSetting(func(s Setting) error { - if s.ID == SettingHeaderTableSize { - found = true - if got, want := s.Val, reqSize; got != want { - return fmt.Errorf("received SETTINGS_HEADER_TABLE_SIZE = %d, want %d", got, want) - } - } - return nil - }) - if err != nil { - return err - } - if !found { - return fmt.Errorf("missing SETTINGS_HEADER_TABLE_SIZE setting") - } - if err := ct.fr.WriteSettings(Setting{SettingHeaderTableSize, resSize}); err != nil { - ct.t.Fatal(err) - } - if err := ct.fr.WriteSettingsAck(); err != nil { - ct.t.Fatal(err) - } - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - switch f := f.(type) { - case *HeadersFrame: - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - return nil - } - } + tc.writeSettings(Setting{SettingHeaderTableSize, resSize}) + tc.cc.mu.Lock() + defer tc.cc.mu.Unlock() + if got, want := tc.cc.peerMaxHeaderTableSize, resSize; got != want { + t.Fatalf("peerHeaderTableSize = %d, want %d", got, want) } - ct.run() } func TestTransportMaxEncoderHeaderTableSize(t *testing.T) { - ct := newClientTester(t) var peerAdvertisedMaxHeaderTableSize uint32 = 16384 - ct.tr.MaxEncoderHeaderTableSize = 8192 - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - cc, err := ct.tr.NewClientConn(ct.cc) - if err != nil { - return err - } - _, err = cc.RoundTrip(req) - if err != nil { - return err - } - if got, want := cc.henc.MaxDynamicTableSize(), ct.tr.MaxEncoderHeaderTableSize; got != want { - return fmt.Errorf("henc.MaxDynamicTableSize() = %d, want %d", got, want) - } - return nil - } - ct.server = func() error { - buf := make([]byte, len(ClientPreface)) - _, err := io.ReadFull(ct.sc, buf) - if err != nil { - return fmt.Errorf("reading client preface: %v", err) - } - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - sf, ok := f.(*SettingsFrame) - if !ok { - ct.t.Fatalf("wanted client settings frame; got %v", f) - _ = sf // stash it away? - } - if err := ct.fr.WriteSettings(Setting{SettingHeaderTableSize, peerAdvertisedMaxHeaderTableSize}); err != nil { - ct.t.Fatal(err) - } - if err := ct.fr.WriteSettingsAck(); err != nil { - ct.t.Fatal(err) - } + tc := newTestClientConn(t, func(tr *Transport) { + tr.MaxEncoderHeaderTableSize = 8192 + }) + tc.greet(Setting{SettingHeaderTableSize, peerAdvertisedMaxHeaderTableSize}) - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - switch f := f.(type) { - case *HeadersFrame: - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - return nil - } - } + if got, want := tc.cc.henc.MaxDynamicTableSize(), tc.tr.MaxEncoderHeaderTableSize; got != want { + t.Fatalf("henc.MaxDynamicTableSize() = %d, want %d", got, want) } - ct.run() } func TestAuthorityAddr(t *testing.T) { @@ -4480,7 +3581,7 @@ func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) { writeErr := make(chan error, 1) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.(http.Flusher).Flush() var sum int64 for i := 0; i < 100; i++ { @@ -4493,13 +3594,12 @@ func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) { } t.Logf("wrote all %d bytes", sum) writeErr <- nil - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() c := &http.Client{Transport: tr} - res, err := c.Get(st.ts.URL) + res, err := c.Get(ts.URL) if err != nil { t.Fatal(err) } @@ -4530,61 +3630,43 @@ func TestTransportAllocationsAfterResponseBodyClose(t *testing.T) { // Issue 18891: make sure Request.Body == NoBody means no DATA frame // is ever sent, even if empty. func TestTransportNoBodyMeansNoDATA(t *testing.T) { - ct := newClientTester(t) - - unblockClient := make(chan bool) - - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", http.NoBody) - ct.tr.RoundTrip(req) - <-unblockClient - return nil - } - ct.server = func() error { - defer close(unblockClient) - defer ct.cc.(*net.TCPConn).Close() - ct.greet() - - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return fmt.Errorf("ReadFrame while waiting for Headers: %v", err) - } - switch f := f.(type) { - default: - return fmt.Errorf("Got %T; want HeadersFrame", f) - case *WindowUpdateFrame, *SettingsFrame: - continue - case *HeadersFrame: - if !f.StreamEnded() { - return fmt.Errorf("got headers frame without END_STREAM") - } - return nil - } - } + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", http.NoBody) + rt := tc.roundTrip(req) + + tc.wantHeaders(wantHeader{ + streamID: rt.streamID(), + endStream: true, // END_STREAM should be set when body is http.NoBody + header: http.Header{ + ":authority": []string{"dummy.tld"}, + ":method": []string{"GET"}, + ":path": []string{"/"}, + }, + }) + if fr := tc.readFrame(); fr != nil { + t.Fatalf("unexpected frame after headers: %v", fr) } - ct.run() } func benchSimpleRoundTrip(b *testing.B, nReqHeaders, nResHeader int) { - defer disableGoroutineTracking()() + disableGoroutineTracking(b) b.ReportAllocs() - st := newServerTester(b, + ts := newTestServer(b, func(w http.ResponseWriter, r *http.Request) { for i := 0; i < nResHeader; i++ { name := fmt.Sprint("A-", i) w.Header().Set(name, "*") } }, - optOnlyServer, optQuiet, ) - defer st.Close() tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - req, err := http.NewRequest("GET", st.ts.URL, nil) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { b.Fatal(err) } @@ -4620,16 +3702,15 @@ func (r infiniteReader) Read(b []byte) (int, error) { // Issue 20521: it is not an error to receive a response and end stream // from the server without the body being consumed. func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() // The request body needs to be big enough to trigger flow control. - req, _ := http.NewRequest("PUT", st.ts.URL, infiniteReader{}) + req, _ := http.NewRequest("PUT", ts.URL, infiniteReader{}) res, err := tr.RoundTrip(req) if err != nil { t.Fatal(err) @@ -4642,41 +3723,22 @@ func TestTransportResponseAndResetWithoutConsumingBodyRace(t *testing.T) { // Verify transport doesn't crash when receiving bogus response lacking a :status header. // Issue 22880. func TestTransportHandlesInvalidStatuslessResponse(t *testing.T) { - ct := newClientTester(t) - ct.client = func() error { - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - _, err := ct.tr.RoundTrip(req) - const substr = "malformed response from server: missing status pseudo header" - if !strings.Contains(fmt.Sprint(err), substr) { - return fmt.Errorf("RoundTrip error = %v; want substring %q", err, substr) - } - return nil - } - ct.server = func() error { - ct.greet() - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } - switch f := f.(type) { - case *HeadersFrame: - enc.WriteField(hpack.HeaderField{Name: "content-type", Value: "text/html"}) // no :status header - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: false, // we'll send some DATA to try to crash the transport - BlockFragment: buf.Bytes(), - }) - ct.fr.WriteData(f.StreamID, true, []byte("payload")) - return nil - } - } - } - ct.run() + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, // we'll send some DATA to try to crash the transport + BlockFragment: tc.makeHeaderBlockFragment( + "content-type", "text/html", // no :status header + ), + }) + tc.writeData(rt.streamID(), true, []byte("payload")) } func BenchmarkClientRequestHeaders(b *testing.B) { @@ -4701,10 +3763,10 @@ func BenchmarkDownloadFrameSize(b *testing.B) { b.Run("512k Frame", func(b *testing.B) { benchLargeDownloadRoundTrip(b, 512*1024) }) } func benchLargeDownloadRoundTrip(b *testing.B, frameSize uint32) { - defer disableGoroutineTracking()() + disableGoroutineTracking(b) const transferSize = 1024 * 1024 * 1024 // must be multiple of 1M b.ReportAllocs() - st := newServerTester(b, + ts := newTestServer(b, func(w http.ResponseWriter, r *http.Request) { // test 1GB transfer w.Header().Set("Content-Length", strconv.Itoa(transferSize)) @@ -4715,12 +3777,11 @@ func benchLargeDownloadRoundTrip(b *testing.B, frameSize uint32) { } }, optQuiet, ) - defer st.Close() tr := &Transport{TLSClientConfig: tlsConfigInsecure, MaxReadFrameSize: frameSize} defer tr.CloseIdleConnections() - req, err := http.NewRequest("GET", st.ts.URL, nil) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { b.Fatal(err) } @@ -4779,7 +3840,7 @@ func testClientConnClose(t *testing.T, closeMode closeMode) { closeDone := make(chan struct{}) beforeHeader := func() {} bodyWrite := func(w http.ResponseWriter) {} - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { defer close(handlerDone) beforeHeader() w.WriteHeader(http.StatusOK) @@ -4796,13 +3857,12 @@ func testClientConnClose(t *testing.T, closeMode closeMode) { t.Error("expected connection closed by client") } } - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() ctx := context.Background() - cc, err := tr.dialClientConn(ctx, st.ts.Listener.Addr().String(), false) - req, err := http.NewRequest("GET", st.ts.URL, nil) + cc, err := tr.dialClientConn(ctx, ts.Listener.Addr().String(), false) + req, err := http.NewRequest("GET", ts.URL, nil) if err != nil { t.Fatal(err) } @@ -4902,7 +3962,7 @@ func testClientConnClose(t *testing.T, closeMode closeMode) { case closeAtHeaders, closeAtBody: if closeMode == closeAtBody { go close(sendBody) - if _, err := io.Copy(ioutil.Discard, res.Body); err == nil { + if _, err := io.Copy(io.Discard, res.Body); err == nil { t.Error("expected a Copy error, got nil") } } @@ -4953,7 +4013,7 @@ func TestClientConnShutdownCancel(t *testing.T) { func TestTransportUsesGetBodyWhenPresent(t *testing.T) { calls := 0 someBody := func() io.ReadCloser { - return struct{ io.ReadCloser }{ioutil.NopCloser(bytes.NewReader(nil))} + return struct{ io.ReadCloser }{io.NopCloser(bytes.NewReader(nil))} } req := &http.Request{ Body: someBody(), @@ -5024,95 +4084,42 @@ func (r *errReader) Read(p []byte) (int, error) { } func testTransportBodyReadError(t *testing.T, body []byte) { - if runtime.GOOS == "windows" || runtime.GOOS == "plan9" { - // So far we've only seen this be flaky on Windows and Plan 9, - // perhaps due to TCP behavior on shutdowns while - // unread data is in flight. This test should be - // fixed, but a skip is better than annoying people - // for now. - t.Skipf("skipping flaky test on %s; https://golang.org/issue/31260", runtime.GOOS) - } - clientDone := make(chan struct{}) - ct := newClientTester(t) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - if runtime.GOOS == "plan9" { - // CloseWrite not supported on Plan 9; Issue 17906 - defer ct.cc.(*net.TCPConn).Close() + tc := newTestClientConn(t) + tc.greet() + + bodyReadError := errors.New("body read error") + b := tc.newRequestBody() + b.Write(body) + b.closeWithError(bodyReadError) + req, _ := http.NewRequest("PUT", "https://dummy.tld/", b) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + var receivedBody []byte +readFrames: + for { + switch f := tc.readFrame().(type) { + case *DataFrame: + receivedBody = append(receivedBody, f.Data()...) + case *RSTStreamFrame: + break readFrames + default: + t.Fatalf("unexpected frame: %v", f) + case nil: + t.Fatalf("transport is idle, want RST_STREAM") } - defer close(clientDone) + } + if !bytes.Equal(receivedBody, body) { + t.Fatalf("body: %q; expected %q", receivedBody, body) + } - checkNoStreams := func() error { - cp, ok := ct.tr.connPool().(*clientConnPool) - if !ok { - return fmt.Errorf("conn pool is %T; want *clientConnPool", ct.tr.connPool()) - } - cp.mu.Lock() - defer cp.mu.Unlock() - conns, ok := cp.conns["dummy.tld:443"] - if !ok { - return fmt.Errorf("missing connection") - } - if len(conns) != 1 { - return fmt.Errorf("conn pool size: %v; expect 1", len(conns)) - } - if activeStreams(conns[0]) != 0 { - return fmt.Errorf("active streams count: %v; want 0", activeStreams(conns[0])) - } - return nil - } - bodyReadError := errors.New("body read error") - body := &errReader{body, bodyReadError} - req, err := http.NewRequest("PUT", "https://dummy.tld/", body) - if err != nil { - return err - } - _, err = ct.tr.RoundTrip(req) - if err != bodyReadError { - return fmt.Errorf("err = %v; want %v", err, bodyReadError) - } - if err = checkNoStreams(); err != nil { - return err - } - return nil + if err := rt.err(); err != bodyReadError { + t.Fatalf("err = %v; want %v", err, bodyReadError) } - ct.server = func() error { - ct.greet() - var receivedBody []byte - var resetCount int - for { - f, err := ct.fr.ReadFrame() - t.Logf("server: ReadFrame = %v, %v", f, err) - if err != nil { - select { - case <-clientDone: - // If the client's done, it - // will have reported any - // errors on its side. - if bytes.Compare(receivedBody, body) != 0 { - return fmt.Errorf("body: %q; expected %q", receivedBody, body) - } - if resetCount != 1 { - return fmt.Errorf("stream reset count: %v; expected: 1", resetCount) - } - return nil - default: - return err - } - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - case *DataFrame: - receivedBody = append(receivedBody, f.Data()...) - case *RSTStreamFrame: - resetCount++ - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - } + + if got := activeStreams(tc.cc); got != 0 { + t.Fatalf("active streams count: %v; want 0", got) } - ct.run() } func TestTransportBodyReadError_Immediately(t *testing.T) { testTransportBodyReadError(t, nil) } @@ -5125,59 +4132,18 @@ func TestTransportBodyEagerEndStream(t *testing.T) { const reqBody = "some request body" const resBody = "some response body" - ct := newClientTester(t) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - if runtime.GOOS == "plan9" { - // CloseWrite not supported on Plan 9; Issue 17906 - defer ct.cc.(*net.TCPConn).Close() - } - body := strings.NewReader(reqBody) - req, err := http.NewRequest("PUT", "https://dummy.tld/", body) - if err != nil { - return err - } - _, err = ct.tr.RoundTrip(req) - if err != nil { - return err - } - return nil - } - ct.server = func() error { - ct.greet() + tc := newTestClientConn(t) + tc.greet() - for { - f, err := ct.fr.ReadFrame() - if err != nil { - return err - } + body := strings.NewReader(reqBody) + req, _ := http.NewRequest("PUT", "https://dummy.tld/", body) + tc.roundTrip(req) - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - case *DataFrame: - if !f.StreamEnded() { - ct.fr.WriteRSTStream(f.StreamID, ErrCodeRefusedStream) - return fmt.Errorf("data frame without END_STREAM %v", f) - } - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.Header().StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: buf.Bytes(), - }) - ct.fr.WriteData(f.StreamID, true, []byte(resBody)) - return nil - case *RSTStreamFrame: - default: - return fmt.Errorf("Unexpected client frame %v", f) - } - } + tc.wantFrameType(FrameHeaders) + f := readFrame[*DataFrame](t, tc) + if !f.StreamEnded() { + t.Fatalf("data frame without END_STREAM %v", f) } - ct.run() } type chunkReader struct { @@ -5217,15 +4183,14 @@ func TestTransportBodyLargerThanSpecifiedContentLength_len2(t *testing.T) { } func testTransportBodyLargerThanSpecifiedContentLength(t *testing.T, body *chunkReader, contentLen int64) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { r.Body.Read(make([]byte, 6)) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() - req, _ := http.NewRequest("POST", st.ts.URL, body) + req, _ := http.NewRequest("POST", ts.URL, body) req.ContentLength = contentLen _, err := tr.RoundTrip(req) if err != errReqBodyTooLong { @@ -5305,13 +4270,12 @@ func TestTransportRoundtripCloseOnWriteError(t *testing.T) { if err != nil { t.Fatal(err) } - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {}, optOnlyServer) - defer st.Close() + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {}) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() ctx := context.Background() - cc, err := tr.dialClientConn(ctx, st.ts.Listener.Addr().String(), false) + cc, err := tr.dialClientConn(ctx, ts.Listener.Addr().String(), false) if err != nil { t.Fatal(err) } @@ -5338,12 +4302,11 @@ func TestTransportRoundtripCloseOnWriteError(t *testing.T) { // already. If the request body has started to be sent, one must wait until it // is completed. func TestTransportBodyRewindRace(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Connection", "close") w.WriteHeader(http.StatusOK) return - }, optOnlyServer) - defer st.Close() + }) tr := &http.Transport{ TLSClientConfig: tlsConfigInsecure, @@ -5362,7 +4325,7 @@ func TestTransportBodyRewindRace(t *testing.T) { var wg sync.WaitGroup wg.Add(clients) for i := 0; i < clients; i++ { - req, err := http.NewRequest("POST", st.ts.URL, bytes.NewBufferString("abcdef")) + req, err := http.NewRequest("POST", ts.URL, bytes.NewBufferString("abcdef")) if err != nil { t.Fatalf("unexpect new request error: %v", err) } @@ -5382,11 +4345,10 @@ func TestTransportBodyRewindRace(t *testing.T) { // Issue 42498: A request with a body will never be sent if the stream is // reset prior to sending any data. func TestTransportServerResetStreamAtHeaders(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) return - }, optOnlyServer) - defer st.Close() + }) tr := &http.Transport{ TLSClientConfig: tlsConfigInsecure, @@ -5402,7 +4364,7 @@ func TestTransportServerResetStreamAtHeaders(t *testing.T) { Transport: tr, } - req, err := http.NewRequest("POST", st.ts.URL, errorReader{io.EOF}) + req, err := http.NewRequest("POST", ts.URL, errorReader{io.EOF}) if err != nil { t.Fatalf("unexpect new request error: %v", err) } @@ -5430,15 +4392,14 @@ func (tr *trackingReader) WasRead() bool { } func TestTransportExpectContinue(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/reject": w.WriteHeader(403) default: io.Copy(io.Discard, r.Body) } - }, optOnlyServer) - defer st.Close() + }) tr := &http.Transport{ TLSClientConfig: tlsConfigInsecure, @@ -5481,7 +4442,7 @@ func TestTransportExpectContinue(t *testing.T) { t.Run(tc.Name, func(t *testing.T) { startTime := time.Now() - req, err := http.NewRequest("POST", st.ts.URL+tc.Path, tc.Body) + req, err := http.NewRequest("POST", ts.URL+tc.Path, tc.Body) if err != nil { t.Fatal(err) } @@ -5593,11 +4554,11 @@ func (c *blockingWriteConn) Write(b []byte) (n int, err error) { func TestTransportFrameBufferReuse(t *testing.T) { filler := hex.EncodeToString([]byte(randString(2048))) - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { if got, want := r.Header.Get("Big"), filler; got != want { t.Errorf(`r.Header.Get("Big") = %q, want %q`, got, want) } - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { t.Errorf("error reading request body: %v", err) } @@ -5607,8 +4568,7 @@ func TestTransportFrameBufferReuse(t *testing.T) { if got, want := r.Trailer.Get("Big"), filler; got != want { t.Errorf(`r.Trailer.Get("Big") = %q, want %q`, got, want) } - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() @@ -5619,7 +4579,7 @@ func TestTransportFrameBufferReuse(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - req, err := http.NewRequest("POST", st.ts.URL, strings.NewReader(filler)) + req, err := http.NewRequest("POST", ts.URL, strings.NewReader(filler)) if err != nil { t.Error(err) return @@ -5685,7 +4645,7 @@ func TestTransportBlockingRequestWrite(t *testing.T) { }} { test := test t.Run(test.name, func(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { if v := r.Header.Get("Big"); v != "" && v != filler { t.Errorf("request header mismatch") } @@ -5695,10 +4655,9 @@ func TestTransportBlockingRequestWrite(t *testing.T) { if v := r.Trailer.Get("Big"); v != "" && v != filler { t.Errorf("request trailer mismatch\ngot: %q\nwant: %q", string(v), filler) } - }, optOnlyServer, func(s *Server) { + }, func(s *Server) { s.MaxConcurrentStreams = 1 }) - defer st.Close() // This Transport creates connections that block on writes after 1024 bytes. connc := make(chan *blockingWriteConn, 1) @@ -5720,7 +4679,7 @@ func TestTransportBlockingRequestWrite(t *testing.T) { // Request 1: A small request to ensure we read the server MaxConcurrentStreams. { - req, err := http.NewRequest("POST", st.ts.URL, nil) + req, err := http.NewRequest("POST", ts.URL, nil) if err != nil { t.Fatal(err) } @@ -5740,7 +4699,7 @@ func TestTransportBlockingRequestWrite(t *testing.T) { reqc := make(chan struct{}) go func() { defer close(reqc) - req, err := test.req(st.ts.URL) + req, err := test.req(ts.URL) if err != nil { t.Error(err) return @@ -5756,7 +4715,7 @@ func TestTransportBlockingRequestWrite(t *testing.T) { // Request 3: A small request that is sent on a new connection, since request 2 // is hogging the only available stream on the previous connection. { - req, err := http.NewRequest("POST", st.ts.URL, nil) + req, err := http.NewRequest("POST", ts.URL, nil) if err != nil { t.Fatal(err) } @@ -5791,15 +4750,14 @@ func TestTransportBlockingRequestWrite(t *testing.T) { func TestTransportCloseRequestBody(t *testing.T) { var statusCode int - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(statusCode) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() ctx := context.Background() - cc, err := tr.dialClientConn(ctx, st.ts.Listener.Addr().String(), false) + cc, err := tr.dialClientConn(ctx, ts.Listener.Addr().String(), false) if err != nil { t.Fatal(err) } @@ -5826,185 +4784,113 @@ func TestTransportCloseRequestBody(t *testing.T) { } } -// collectClientsConnPool is a ClientConnPool that wraps lower and -// collects what calls were made on it. -type collectClientsConnPool struct { - lower ClientConnPool - - mu sync.Mutex - getErrs int - got []*ClientConn -} - -func (p *collectClientsConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - cc, err := p.lower.GetClientConn(req, addr) - p.mu.Lock() - defer p.mu.Unlock() - if err != nil { - p.getErrs++ - return nil, err - } - p.got = append(p.got, cc) - return cc, nil -} - -func (p *collectClientsConnPool) MarkDead(cc *ClientConn) { - p.lower.MarkDead(cc) -} - func TestTransportRetriesOnStreamProtocolError(t *testing.T) { - ct := newClientTester(t) - pool := &collectClientsConnPool{ - lower: &clientConnPool{t: ct.tr}, - } - ct.tr.ConnPool = pool + // This test verifies that + // - receiving a protocol error on a connection does not interfere with + // other requests in flight on that connection; + // - the connection is not reused for further requests; and + // - the failed request is retried on a new connecection. + tt := newTestTransport(t) + + // Start two requests. The first is a long request + // that will finish after the second. The second one + // will result in the protocol error. + + // Request #1: The long request. + req1, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt1 := tt.roundTrip(req1) + tc1 := tt.getConn() + tc1.wantFrameType(FrameSettings) + tc1.wantFrameType(FrameWindowUpdate) + tc1.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) + tc1.writeSettings() + tc1.wantFrameType(FrameSettings) // settings ACK + + // Request #2(a): The short request. + req2, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt2 := tt.roundTrip(req2) + tc1.wantHeaders(wantHeader{ + streamID: 3, + endStream: true, + }) - gotProtoError := make(chan bool, 1) - ct.tr.CountError = func(errType string) { - if errType == "recv_rststream_PROTOCOL_ERROR" { - select { - case gotProtoError <- true: - default: - } - } + // Request #2(a) fails with ErrCodeProtocol. + tc1.writeRSTStream(3, ErrCodeProtocol) + if rt1.done() { + t.Fatalf("After protocol error on RoundTrip #2, RoundTrip #1 is done; want still in progress") } - ct.client = func() error { - // Start two requests. The first is a long request - // that will finish after the second. The second one - // will result in the protocol error. We check that - // after the first one closes, the connection then - // shuts down. - - // The long, outer request. - req1, _ := http.NewRequest("GET", "https://dummy.tld/long", nil) - res1, err := ct.tr.RoundTrip(req1) - if err != nil { - return err - } - if got, want := res1.Header.Get("Is-Long"), "1"; got != want { - return fmt.Errorf("First response's Is-Long header = %q; want %q", got, want) - } - - req, _ := http.NewRequest("POST", "https://dummy.tld/fails", nil) - res, err := ct.tr.RoundTrip(req) - const want = "only one dial allowed in test mode" - if got := fmt.Sprint(err); got != want { - t.Errorf("didn't dial again: got %#q; want %#q", got, want) - } - if res != nil { - res.Body.Close() - } - select { - case <-gotProtoError: - default: - t.Errorf("didn't get stream protocol error") - } - - if n, err := res1.Body.Read(make([]byte, 10)); err != io.EOF || n != 0 { - t.Errorf("unexpected body read %v, %v", n, err) - } - - pool.mu.Lock() - defer pool.mu.Unlock() - if pool.getErrs != 1 { - t.Errorf("pool get errors = %v; want 1", pool.getErrs) - } - if len(pool.got) == 2 { - if pool.got[0] != pool.got[1] { - t.Errorf("requests went on different connections") - } - cc := pool.got[0] - cc.mu.Lock() - if !cc.doNotReuse { - t.Error("ClientConn not marked doNotReuse") - } - cc.mu.Unlock() - - select { - case <-cc.readerDone: - case <-time.After(5 * time.Second): - t.Errorf("timeout waiting for reader to be done") - } - } else { - t.Errorf("pool get success = %v; want 2", len(pool.got)) - } - return nil + if rt2.done() { + t.Fatalf("After protocol error on RoundTrip #2, RoundTrip #2 is done; want still in progress") } - ct.server = func() error { - ct.greet() - var sentErr bool - var numHeaders int - var firstStreamID uint32 - - var hbuf bytes.Buffer - enc := hpack.NewEncoder(&hbuf) - for { - f, err := ct.fr.ReadFrame() - if err == io.EOF { - // Client hung up on us, as it should at the end. - return nil - } - if err != nil { - return nil - } - switch f := f.(type) { - case *WindowUpdateFrame, *SettingsFrame: - case *HeadersFrame: - numHeaders++ - if numHeaders == 1 { - firstStreamID = f.StreamID - hbuf.Reset() - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - enc.WriteField(hpack.HeaderField{Name: "is-long", Value: "1"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: f.StreamID, - EndHeaders: true, - EndStream: false, - BlockFragment: hbuf.Bytes(), - }) - continue - } - if !sentErr { - sentErr = true - ct.fr.WriteRSTStream(f.StreamID, ErrCodeProtocol) - ct.fr.WriteData(firstStreamID, true, nil) - continue - } - } - } - } - ct.run() + // Request #2(b): The short request is retried on a new connection. + tc2 := tt.getConn() + tc2.wantFrameType(FrameSettings) + tc2.wantFrameType(FrameWindowUpdate) + tc2.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + }) + tc2.writeSettings() + tc2.wantFrameType(FrameSettings) // settings ACK + + // Request #2(b) succeeds. + tc2.writeHeaders(HeadersFrameParam{ + StreamID: 1, + EndHeaders: true, + EndStream: true, + BlockFragment: tc1.makeHeaderBlockFragment( + ":status", "201", + ), + }) + rt2.wantStatus(201) + + // Request #1 succeeds. + tc1.writeHeaders(HeadersFrameParam{ + StreamID: 1, + EndHeaders: true, + EndStream: true, + BlockFragment: tc1.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt1.wantStatus(200) } func TestClientConnReservations(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - }, func(s *Server) { - s.MaxConcurrentStreams = initialMaxConcurrentStreams - }) - defer st.Close() - - tr := &Transport{TLSClientConfig: tlsConfigInsecure} - defer tr.CloseIdleConnections() + tc := newTestClientConn(t) + tc.greet( + Setting{ID: SettingMaxConcurrentStreams, Val: initialMaxConcurrentStreams}, + ) - cc, err := tr.newClientConn(st.cc, false) - if err != nil { - t.Fatal(err) + doRoundTrip := func() { + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt.wantStatus(200) } - req, _ := http.NewRequest("GET", st.ts.URL, nil) n := 0 - for n <= initialMaxConcurrentStreams && cc.ReserveNewRequest() { + for n <= initialMaxConcurrentStreams && tc.cc.ReserveNewRequest() { n++ } if n != initialMaxConcurrentStreams { t.Errorf("did %v reservations; want %v", n, initialMaxConcurrentStreams) } - if _, err := cc.RoundTrip(req); err != nil { - t.Fatalf("RoundTrip error = %v", err) - } + doRoundTrip() n2 := 0 - for n2 <= 5 && cc.ReserveNewRequest() { + for n2 <= 5 && tc.cc.ReserveNewRequest() { n2++ } if n2 != 1 { @@ -6013,11 +4899,11 @@ func TestClientConnReservations(t *testing.T) { // Use up all the reservations for i := 0; i < n; i++ { - cc.RoundTrip(req) + doRoundTrip() } n2 = 0 - for n2 <= initialMaxConcurrentStreams && cc.ReserveNewRequest() { + for n2 <= initialMaxConcurrentStreams && tc.cc.ReserveNewRequest() { n2++ } if n2 != n { @@ -6026,47 +4912,34 @@ func TestClientConnReservations(t *testing.T) { } func TestTransportTimeoutServerHangs(t *testing.T) { - clientDone := make(chan struct{}) - ct := newClientTester(t) - ct.client = func() error { - defer ct.cc.(*net.TCPConn).CloseWrite() - defer close(clientDone) + tc := newTestClientConn(t) + tc.greet() - req, err := http.NewRequest("PUT", "https://dummy.tld/", nil) - if err != nil { - return err - } + ctx, cancel := context.WithCancel(context.Background()) + req, _ := http.NewRequestWithContext(ctx, "PUT", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - req = req.WithContext(ctx) - req.Header.Add("Big", strings.Repeat("a", 1<<20)) - _, err = ct.tr.RoundTrip(req) - if err == nil { - return errors.New("error should not be nil") - } - if ne, ok := err.(net.Error); !ok || !ne.Timeout() { - return fmt.Errorf("error should be a net error timeout: %v", err) - } - return nil + tc.wantFrameType(FrameHeaders) + tc.advance(5 * time.Second) + if f := tc.readFrame(); f != nil { + t.Fatalf("unexpected frame: %v", f) } - ct.server = func() error { - ct.greet() - select { - case <-time.After(5 * time.Second): - case <-clientDone: - } - return nil + if rt.done() { + t.Fatalf("after 5 seconds with no response, RoundTrip unexpectedly returned") + } + + cancel() + tc.sync() + if rt.err() != context.Canceled { + t.Fatalf("RoundTrip error: %v; want context.Canceled", rt.err()) } - ct.run() } func TestTransportContentLengthWithoutBody(t *testing.T) { contentLength := "" - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Length", contentLength) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() @@ -6093,7 +4966,7 @@ func TestTransportContentLengthWithoutBody(t *testing.T) { t.Run(test.name, func(t *testing.T) { contentLength = test.contentLength - req, _ := http.NewRequest("GET", st.ts.URL, nil) + req, _ := http.NewRequest("GET", ts.URL, nil) res, err := tr.RoundTrip(req) if err != nil { t.Fatal(err) @@ -6115,18 +4988,17 @@ func TestTransportContentLengthWithoutBody(t *testing.T) { } func TestTransportCloseResponseBodyWhileRequestBodyHangs(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(200) w.(http.Flusher).Flush() io.Copy(io.Discard, r.Body) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() pr, pw := net.Pipe() - req, err := http.NewRequest("GET", st.ts.URL, pr) + req, err := http.NewRequest("GET", ts.URL, pr) if err != nil { t.Fatal(err) } @@ -6142,19 +5014,18 @@ func TestTransportCloseResponseBodyWhileRequestBodyHangs(t *testing.T) { func TestTransport300ResponseBody(t *testing.T) { reqc := make(chan struct{}) body := []byte("response body") - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(300) w.(http.Flusher).Flush() <-reqc w.Write(body) - }, optOnlyServer) - defer st.Close() + }) tr := &Transport{TLSClientConfig: tlsConfigInsecure} defer tr.CloseIdleConnections() pr, pw := net.Pipe() - req, err := http.NewRequest("GET", st.ts.URL, pr) + req, err := http.NewRequest("GET", ts.URL, pr) if err != nil { t.Fatal(err) } @@ -6175,11 +5046,9 @@ func TestTransport300ResponseBody(t *testing.T) { } func TestTransportWriteByteTimeout(t *testing.T) { - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {}, - optOnlyServer, ) - defer st.Close() tr := &Transport{ TLSClientConfig: tlsConfigInsecure, DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { @@ -6191,7 +5060,7 @@ func TestTransportWriteByteTimeout(t *testing.T) { defer tr.CloseIdleConnections() c := &http.Client{Transport: tr} - _, err := c.Get(st.ts.URL) + _, err := c.Get(ts.URL) if !errors.Is(err, os.ErrDeadlineExceeded) { t.Fatalf("Get on unresponsive connection: got %q; want ErrDeadlineExceeded", err) } @@ -6219,11 +5088,9 @@ func (c *slowWriteConn) Write(b []byte) (n int, err error) { } func TestTransportSlowWrites(t *testing.T) { - st := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {}, - optOnlyServer, ) - defer st.Close() tr := &Transport{ TLSClientConfig: tlsConfigInsecure, DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) { @@ -6237,7 +5104,7 @@ func TestTransportSlowWrites(t *testing.T) { c := &http.Client{Transport: tr} const bodySize = 1 << 20 - resp, err := c.Post(st.ts.URL, "text/foo", io.LimitReader(neverEnding('A'), bodySize)) + resp, err := c.Post(ts.URL, "text/foo", io.LimitReader(neverEnding('A'), bodySize)) if err != nil { t.Fatal(err) } @@ -6251,20 +5118,6 @@ func TestTransportClosesConnAfterGoAwayLastStream(t *testing.T) { testTransportClosesConnAfterGoAway(t, 1) } -type closeOnceConn struct { - net.Conn - closed uint32 -} - -var errClosed = errors.New("Close of closed connection") - -func (c *closeOnceConn) Close() error { - if atomic.CompareAndSwapUint32(&c.closed, 0, 1) { - return c.Conn.Close() - } - return errClosed -} - // testTransportClosesConnAfterGoAway verifies that the transport // closes a connection after reading a GOAWAY from it. // @@ -6272,53 +5125,35 @@ func (c *closeOnceConn) Close() error { // When 0, the transport (unsuccessfully) retries the request (stream 1); // when 1, the transport reads the response after receiving the GOAWAY. func testTransportClosesConnAfterGoAway(t *testing.T, lastStream uint32) { - ct := newClientTester(t) - ct.cc = &closeOnceConn{Conn: ct.cc} - - var wg sync.WaitGroup - wg.Add(1) - ct.client = func() error { - defer wg.Done() - req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) - res, err := ct.tr.RoundTrip(req) - if err == nil { - res.Body.Close() - } - if gotErr, wantErr := err != nil, lastStream == 0; gotErr != wantErr { - t.Errorf("RoundTrip got error %v (want error: %v)", err, wantErr) - } - if err = ct.cc.Close(); err != errClosed { - return fmt.Errorf("ct.cc.Close() = %v, want errClosed", err) - } - return nil + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeGoAway(lastStream, ErrCodeNo, nil) + + if lastStream > 0 { + // Send a valid response to first request. + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) } - ct.server = func() error { - defer wg.Wait() - ct.greet() - hf, err := ct.firstHeaders() - if err != nil { - return fmt.Errorf("server failed reading HEADERS: %v", err) - } - if err := ct.fr.WriteGoAway(lastStream, ErrCodeNo, nil); err != nil { - return fmt.Errorf("server failed writing GOAWAY: %v", err) - } - if lastStream > 0 { - // Send a valid response to first request. - var buf bytes.Buffer - enc := hpack.NewEncoder(&buf) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - ct.fr.WriteHeaders(HeadersFrameParam{ - StreamID: hf.StreamID, - EndHeaders: true, - EndStream: true, - BlockFragment: buf.Bytes(), - }) - } - return nil + tc.closeWrite() + err := rt.err() + if gotErr, wantErr := err != nil, lastStream == 0; gotErr != wantErr { + t.Errorf("RoundTrip got error %v (want error: %v)", err, wantErr) + } + if !tc.isClosed() { + t.Errorf("ClientConn did not close its net.Conn, expected it to") } - - ct.run() } type slowCloser struct { @@ -6337,11 +5172,10 @@ func (r *slowCloser) Close() error { } func TestTransportSlowClose(t *testing.T) { - st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) { - }, optOnlyServer) - defer st.Close() + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { + }) - client := st.ts.Client() + client := ts.Client() body := &slowCloser{ closing: make(chan struct{}), closed: make(chan struct{}), @@ -6350,7 +5184,7 @@ func TestTransportSlowClose(t *testing.T) { reqc := make(chan struct{}) go func() { defer close(reqc) - res, err := client.Post(st.ts.URL, "text/plain", body) + res, err := client.Post(ts.URL, "text/plain", body) if err != nil { t.Error(err) } @@ -6363,7 +5197,7 @@ func TestTransportSlowClose(t *testing.T) { <-body.closing // wait for POST request to call body.Close // This GET request should not be blocked by the in-progress POST. - res, err := client.Get(st.ts.URL) + res, err := client.Get(ts.URL) if err != nil { t.Fatal(err) } @@ -6379,12 +5213,10 @@ func TestTransportDialTLSContext(t *testing.T) { ClientAuth: tls.RequestClientCert, } } - ts := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {}, - optOnlyServer, serverTLSConfigFunc, ) - defer ts.Close() tr := &Transport{ TLSClientConfig: &tls.Config{ GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { @@ -6398,7 +5230,7 @@ func TestTransportDialTLSContext(t *testing.T) { }, } defer tr.CloseIdleConnections() - req, err := http.NewRequest(http.MethodGet, ts.ts.URL, nil) + req, err := http.NewRequest(http.MethodGet, ts.URL, nil) if err != nil { t.Fatal(err) } @@ -6443,12 +5275,10 @@ func TestDialRaceResumesDial(t *testing.T) { ClientAuth: tls.RequestClientCert, } } - ts := newServerTester(t, + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {}, - optOnlyServer, serverTLSConfigFunc, ) - defer ts.Close() tr := &Transport{ TLSClientConfig: &tls.Config{ GetClientCertificate: func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { @@ -6466,7 +5296,7 @@ func TestDialRaceResumesDial(t *testing.T) { }, } defer tr.CloseIdleConnections() - req, err := http.NewRequest(http.MethodGet, ts.ts.URL, nil) + req, err := http.NewRequest(http.MethodGet, ts.URL, nil) if err != nil { t.Fatal(err) } @@ -6520,3 +5350,537 @@ func TestDialRaceResumesDial(t *testing.T) { case <-successCh: } } + +func TestTransportDataAfter1xxHeader(t *testing.T) { + // Discard logger output to avoid spamming stderr. + log.SetOutput(io.Discard) + defer log.SetOutput(os.Stderr) + + // https://go.dev/issue/65927 - server sends a 1xx response, followed by a DATA frame. + tc := newTestClientConn(t) + tc.greet() + + req, _ := http.NewRequest("GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "100", + ), + }) + tc.writeData(rt.streamID(), true, []byte{0}) + err := rt.err() + if err, ok := err.(StreamError); !ok || err.Code != ErrCodeProtocol { + t.Errorf("RoundTrip error: %v; want ErrCodeProtocol", err) + } + tc.wantFrameType(FrameRSTStream) +} + +func TestIssue66763Race(t *testing.T) { + tr := &Transport{ + IdleConnTimeout: 1 * time.Nanosecond, + AllowHTTP: true, // issue 66763 only occurs when AllowHTTP is true + } + defer tr.CloseIdleConnections() + + cli, srv := net.Pipe() + donec := make(chan struct{}) + go func() { + // Creating the client conn may succeed or fail, + // depending on when the idle timeout happens. + // Either way, the idle timeout will close the net.Conn. + tr.NewClientConn(cli) + close(donec) + }() + + // The client sends its preface and SETTINGS frame, + // and then closes its conn after the idle timeout. + io.ReadAll(srv) + srv.Close() + + <-donec +} + +// Issue 67671: Sending a Connection: close request on a Transport with AllowHTTP +// set caused a the transport to wedge. +func TestIssue67671(t *testing.T) { + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) {}) + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + AllowHTTP: true, + } + defer tr.CloseIdleConnections() + req, _ := http.NewRequest("GET", ts.URL, nil) + req.Close = true + for i := 0; i < 2; i++ { + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + } +} + +func TestTransport1xxLimits(t *testing.T) { + for _, test := range []struct { + name string + opt any + ctxfn func(context.Context) context.Context + hcount int + limited bool + }{{ + name: "default", + hcount: 10, + limited: false, + }, { + name: "MaxHeaderListSize", + opt: func(tr *Transport) { + tr.MaxHeaderListSize = 10000 + }, + hcount: 10, + limited: true, + }, { + name: "MaxResponseHeaderBytes", + opt: func(tr *http.Transport) { + tr.MaxResponseHeaderBytes = 10000 + }, + hcount: 10, + limited: true, + }, { + name: "limit by client trace", + ctxfn: func(ctx context.Context) context.Context { + count := 0 + return httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ + Got1xxResponse: func(code int, header textproto.MIMEHeader) error { + count++ + if count >= 10 { + return errors.New("too many 1xx") + } + return nil + }, + }) + }, + hcount: 10, + limited: true, + }, { + name: "limit disabled by client trace", + opt: func(tr *Transport) { + tr.MaxHeaderListSize = 10000 + }, + ctxfn: func(ctx context.Context) context.Context { + return httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ + Got1xxResponse: func(code int, header textproto.MIMEHeader) error { + return nil + }, + }) + }, + hcount: 20, + limited: false, + }} { + t.Run(test.name, func(t *testing.T) { + tc := newTestClientConn(t, test.opt) + tc.greet() + + ctx := context.Background() + if test.ctxfn != nil { + ctx = test.ctxfn(ctx) + } + req, _ := http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil) + rt := tc.roundTrip(req) + tc.wantFrameType(FrameHeaders) + + for i := 0; i < test.hcount; i++ { + if fr, err := tc.fr.ReadFrame(); err != os.ErrDeadlineExceeded { + t.Fatalf("after writing %v 1xx headers: read %v, %v; want idle", i, fr, err) + } + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + EndStream: false, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "103", + "x-field", strings.Repeat("a", 1000), + ), + }) + } + if test.limited { + tc.wantFrameType(FrameRSTStream) + } else { + tc.wantIdle() + } + }) + } +} + +func TestTransportSendPingWithReset(t *testing.T) { + tc := newTestClientConn(t, func(tr *Transport) { + tr.StrictMaxConcurrentStreams = true + }) + + const maxConcurrent = 3 + tc.greet(Setting{SettingMaxConcurrentStreams, maxConcurrent}) + + // Start several requests. + var rts []*testRoundTrip + for i := 0; i < maxConcurrent+1; i++ { + req := must(http.NewRequest("GET", "https://dummy.tld/", nil)) + rt := tc.roundTrip(req) + if i >= maxConcurrent { + tc.wantIdle() + continue + } + tc.wantFrameType(FrameHeaders) + tc.writeHeaders(HeadersFrameParam{ + StreamID: rt.streamID(), + EndHeaders: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt.wantStatus(200) + rts = append(rts, rt) + } + + // Cancel one request. We send a PING frame along with the RST_STREAM. + rts[0].response().Body.Close() + tc.wantRSTStream(rts[0].streamID(), ErrCodeCancel) + pf := readFrame[*PingFrame](t, tc) + tc.wantIdle() + + // Cancel another request. No PING frame, since one is in flight. + rts[1].response().Body.Close() + tc.wantRSTStream(rts[1].streamID(), ErrCodeCancel) + tc.wantIdle() + + // Respond to the PING. + // This finalizes the previous resets, and allows the pending request to be sent. + tc.writePing(true, pf.Data) + tc.wantFrameType(FrameHeaders) + tc.wantIdle() + + // Receive a byte of data for the remaining stream, which resets our ability + // to send pings (see comment on ClientConn.rstStreamPingsBlocked). + tc.writeData(rts[2].streamID(), false, []byte{0}) + + // Cancel the last request. We send another PING, since none are in flight. + rts[2].response().Body.Close() + tc.wantRSTStream(rts[2].streamID(), ErrCodeCancel) + tc.wantFrameType(FramePing) + tc.wantIdle() +} + +// Issue #70505: gRPC gets upset if we send more than 2 pings per HEADERS/DATA frame +// sent by the server. +func TestTransportSendNoMoreThanOnePingWithReset(t *testing.T) { + tc := newTestClientConn(t) + tc.greet() + + makeAndResetRequest := func() { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + req := must(http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil)) + rt := tc.roundTrip(req) + tc.wantFrameType(FrameHeaders) + cancel() + tc.wantRSTStream(rt.streamID(), ErrCodeCancel) // client sends RST_STREAM + } + + // Create a request and cancel it. + // The client sends a PING frame along with the reset. + makeAndResetRequest() + pf1 := readFrame[*PingFrame](t, tc) // client sends PING + + // Create another request and cancel it. + // We do not send a PING frame along with the reset, + // because we haven't received a HEADERS or DATA frame from the server + // since the last PING we sent. + makeAndResetRequest() + + // Server belatedly responds to request 1. + // The server has not responded to our first PING yet. + tc.writeHeaders(HeadersFrameParam{ + StreamID: 1, + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + + // Create yet another request and cancel it. + // We still do not send a PING frame along with the reset. + // We've received a HEADERS frame, but it came before the response to the PING. + makeAndResetRequest() + + // The server responds to our PING. + tc.writePing(true, pf1.Data) + + // Create yet another request and cancel it. + // Still no PING frame; we got a response to the previous one, + // but no HEADERS or DATA. + makeAndResetRequest() + + // Server belatedly responds to the second request. + tc.writeHeaders(HeadersFrameParam{ + StreamID: 3, + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + + // One more request. + // This time we send a PING frame. + makeAndResetRequest() + tc.wantFrameType(FramePing) +} + +func TestTransportConnBecomesUnresponsive(t *testing.T) { + // We send a number of requests in series to an unresponsive connection. + // Each request is canceled or times out without a response. + // Eventually, we open a new connection rather than trying to use the old one. + tt := newTestTransport(t) + + const maxConcurrent = 3 + + t.Logf("first request opens a new connection and succeeds") + req1 := must(http.NewRequest("GET", "https://dummy.tld/", nil)) + rt1 := tt.roundTrip(req1) + tc1 := tt.getConn() + tc1.wantFrameType(FrameSettings) + tc1.wantFrameType(FrameWindowUpdate) + hf1 := readFrame[*HeadersFrame](t, tc1) + tc1.writeSettings(Setting{SettingMaxConcurrentStreams, maxConcurrent}) + tc1.wantFrameType(FrameSettings) // ack + tc1.writeHeaders(HeadersFrameParam{ + StreamID: hf1.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: tc1.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt1.wantStatus(200) + rt1.response().Body.Close() + + // Send more requests. + // None receive a response. + // Each is canceled. + for i := 0; i < maxConcurrent; i++ { + t.Logf("request %v receives no response and is canceled", i) + ctx, cancel := context.WithCancel(context.Background()) + req := must(http.NewRequestWithContext(ctx, "GET", "https://dummy.tld/", nil)) + tt.roundTrip(req) + if tt.hasConn() { + t.Fatalf("new connection created; expect existing conn to be reused") + } + tc1.wantFrameType(FrameHeaders) + cancel() + tc1.wantFrameType(FrameRSTStream) + if i == 0 { + tc1.wantFrameType(FramePing) + } + tc1.wantIdle() + } + + // The conn has hit its concurrency limit. + // The next request is sent on a new conn. + req2 := must(http.NewRequest("GET", "https://dummy.tld/", nil)) + rt2 := tt.roundTrip(req2) + tc2 := tt.getConn() + tc2.wantFrameType(FrameSettings) + tc2.wantFrameType(FrameWindowUpdate) + hf := readFrame[*HeadersFrame](t, tc2) + tc2.writeSettings(Setting{SettingMaxConcurrentStreams, maxConcurrent}) + tc2.wantFrameType(FrameSettings) // ack + tc2.writeHeaders(HeadersFrameParam{ + StreamID: hf.StreamID, + EndHeaders: true, + EndStream: true, + BlockFragment: tc2.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt2.wantStatus(200) + rt2.response().Body.Close() +} + +// Test that the Transport can use a conn provided to it by a TLSNextProto hook. +func TestTransportTLSNextProtoConnOK(t *testing.T) { + t1 := &http.Transport{} + t2, _ := ConfigureTransports(t1) + tt := newTestTransport(t, t2) + + // Create a new, fake connection and pass it to the Transport via the TLSNextProto hook. + cli, _ := synctestNetPipe(tt.group) + cliTLS := tls.Client(cli, tlsConfigInsecure) + go func() { + tt.group.Join() + t1.TLSNextProto["h2"]("dummy.tld", cliTLS) + }() + tt.sync() + tc := tt.getConn() + tc.greet() + + // Send a request on the Transport. + // It uses the conn we provided. + req := must(http.NewRequest("GET", "https://dummy.tld/", nil)) + rt := tt.roundTrip(req) + tc.wantHeaders(wantHeader{ + streamID: 1, + endStream: true, + header: http.Header{ + ":authority": []string{"dummy.tld"}, + ":method": []string{"GET"}, + ":path": []string{"/"}, + }, + }) + tc.writeHeaders(HeadersFrameParam{ + StreamID: 1, + EndHeaders: true, + EndStream: true, + BlockFragment: tc.makeHeaderBlockFragment( + ":status", "200", + ), + }) + rt.wantStatus(200) + rt.wantBody(nil) +} + +// Test the case where a conn provided via a TLSNextProto hook immediately encounters an error. +func TestTransportTLSNextProtoConnImmediateFailureUsed(t *testing.T) { + t1 := &http.Transport{} + t2, _ := ConfigureTransports(t1) + tt := newTestTransport(t, t2) + + // Create a new, fake connection and pass it to the Transport via the TLSNextProto hook. + cli, _ := synctestNetPipe(tt.group) + cliTLS := tls.Client(cli, tlsConfigInsecure) + go func() { + tt.group.Join() + t1.TLSNextProto["h2"]("dummy.tld", cliTLS) + }() + tt.sync() + tc := tt.getConn() + + // The connection encounters an error before we send a request that uses it. + tc.closeWrite() + + // Send a request on the Transport. + // + // It should fail, because we have no usable connections, but not with ErrNoCachedConn. + req := must(http.NewRequest("GET", "https://dummy.tld/", nil)) + rt := tt.roundTrip(req) + if err := rt.err(); err == nil || errors.Is(err, ErrNoCachedConn) { + t.Fatalf("RoundTrip with broken conn: got %v, want an error other than ErrNoCachedConn", err) + } + + // Send the request again. + // This time it should fail with ErrNoCachedConn, + // because the dead conn has been removed from the pool. + rt = tt.roundTrip(req) + if err := rt.err(); !errors.Is(err, ErrNoCachedConn) { + t.Fatalf("RoundTrip after broken conn is used: got %v, want ErrNoCachedConn", err) + } +} + +// Test the case where a conn provided via a TLSNextProto hook immediately encounters an error, +// but no requests are sent which would use the bad connection. +func TestTransportTLSNextProtoConnImmediateFailureUnused(t *testing.T) { + t1 := &http.Transport{} + t2, _ := ConfigureTransports(t1) + tt := newTestTransport(t, t2) + + // Create a new, fake connection and pass it to the Transport via the TLSNextProto hook. + cli, _ := synctestNetPipe(tt.group) + cliTLS := tls.Client(cli, tlsConfigInsecure) + go func() { + tt.group.Join() + t1.TLSNextProto["h2"]("dummy.tld", cliTLS) + }() + tt.sync() + tc := tt.getConn() + + // The connection encounters an error before we send a request that uses it. + tc.closeWrite() + + // Some time passes. + // The dead connection is removed from the pool. + tc.advance(10 * time.Second) + + // Send a request on the Transport. + // + // It should fail with ErrNoCachedConn, because the pool contains no conns. + req := must(http.NewRequest("GET", "https://dummy.tld/", nil)) + rt := tt.roundTrip(req) + if err := rt.err(); !errors.Is(err, ErrNoCachedConn) { + t.Fatalf("RoundTrip after broken conn expires: got %v, want ErrNoCachedConn", err) + } +} + +func TestExtendedConnectClientWithServerSupport(t *testing.T) { + disableExtendedConnectProtocol = false + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get(":protocol") != "extended-connect" { + t.Fatalf("unexpected :protocol header received") + } + t.Log(io.Copy(w, r.Body)) + }) + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + AllowHTTP: true, + } + defer tr.CloseIdleConnections() + pr, pw := io.Pipe() + pwDone := make(chan struct{}) + req, _ := http.NewRequest("CONNECT", ts.URL, pr) + req.Header.Set(":protocol", "extended-connect") + go func() { + pw.Write([]byte("hello, extended connect")) + pw.Close() + close(pwDone) + }() + + res, err := tr.RoundTrip(req) + if err != nil { + t.Fatal(err) + } + body, err := io.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(body, []byte("hello, extended connect")) { + t.Fatal("unexpected body received") + } +} + +func TestExtendedConnectClientWithoutServerSupport(t *testing.T) { + disableExtendedConnectProtocol = true + ts := newTestServer(t, func(w http.ResponseWriter, r *http.Request) { + io.Copy(w, r.Body) + }) + tr := &Transport{ + TLSClientConfig: tlsConfigInsecure, + AllowHTTP: true, + } + defer tr.CloseIdleConnections() + pr, pw := io.Pipe() + pwDone := make(chan struct{}) + req, _ := http.NewRequest("CONNECT", ts.URL, pr) + req.Header.Set(":protocol", "extended-connect") + go func() { + pw.Write([]byte("hello, extended connect")) + pw.Close() + close(pwDone) + }() + + _, err := tr.RoundTrip(req) + if !errors.Is(err, errExtendedConnectNotSupported) { + t.Fatalf("expected error errExtendedConnectNotSupported, got: %v", err) + } +} diff --git a/pkg/http2/unencrypted.go b/pkg/http2/unencrypted.go new file mode 100644 index 0000000..b2de211 --- /dev/null +++ b/pkg/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/pkg/http2/write.go b/pkg/http2/write.go index 33f6139..6ff6bee 100644 --- a/pkg/http2/write.go +++ b/pkg/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/pkg/http2/writesched_priority.go b/pkg/http2/writesched_priority.go index 0a242c6..f678333 100644 --- a/pkg/http2/writesched_priority.go +++ b/pkg/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/pkg/http2/writesched_priority_test.go b/pkg/http2/writesched_priority_test.go index b579ef9..5aad057 100644 --- a/pkg/http2/writesched_priority_test.go +++ b/pkg/http2/writesched_priority_test.go @@ -562,3 +562,37 @@ func TestPriorityRstStreamOnNonOpenStreams(t *testing.T) { t.Error(err) } } + +// https://go.dev/issue/66514 +func TestPriorityIssue66514(t *testing.T) { + addDep := func(ws *priorityWriteScheduler, child uint32, parent uint32) { + ws.AdjustStream(child, PriorityParam{ + StreamDep: parent, + Exclusive: false, + Weight: 16, + }) + } + + validateDepTree := func(ws *priorityWriteScheduler, id uint32, t *testing.T) { + for n := ws.nodes[id]; n != nil; n = n.parent { + if n.parent == nil { + if n.id != uint32(0) { + t.Errorf("detected nodes not parented to 0") + } + } + } + } + + ws := NewPriorityWriteScheduler(nil).(*priorityWriteScheduler) + + // Root entry + addDep(ws, uint32(1), uint32(0)) + addDep(ws, uint32(3), uint32(1)) + addDep(ws, uint32(5), uint32(1)) + + for id := uint32(7); id < uint32(100); id += uint32(4) { + addDep(ws, id, id-uint32(4)) + addDep(ws, id+uint32(2), id-uint32(4)) + validateDepTree(ws, id, t) + } +} diff --git a/pkg/http2/z_spec_test.go b/pkg/http2/z_spec_test.go deleted file mode 100644 index 610b2cd..0000000 --- a/pkg/http2/z_spec_test.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "encoding/xml" - "flag" - "fmt" - "io" - "os" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "sync" - "testing" -) - -var coverSpec = flag.Bool("coverspec", false, "Run spec coverage tests") - -// The global map of sentence coverage for the http2 spec. -var defaultSpecCoverage specCoverage - -var loadSpecOnce sync.Once - -func loadSpec() { - if f, err := os.Open("testdata/draft-ietf-httpbis-http2.xml"); err != nil { - panic(err) - } else { - defaultSpecCoverage = readSpecCov(f) - f.Close() - } -} - -// covers marks all sentences for section sec in defaultSpecCoverage. Sentences not -// "covered" will be included in report outputted by TestSpecCoverage. -func covers(sec, sentences string) { - loadSpecOnce.Do(loadSpec) - defaultSpecCoverage.cover(sec, sentences) -} - -type specPart struct { - section string - sentence string -} - -func (ss specPart) Less(oo specPart) bool { - atoi := func(s string) int { - n, err := strconv.Atoi(s) - if err != nil { - panic(err) - } - return n - } - a := strings.Split(ss.section, ".") - b := strings.Split(oo.section, ".") - for len(a) > 0 { - if len(b) == 0 { - return false - } - x, y := atoi(a[0]), atoi(b[0]) - if x == y { - a, b = a[1:], b[1:] - continue - } - return x < y - } - if len(b) > 0 { - return true - } - return false -} - -type bySpecSection []specPart - -func (a bySpecSection) Len() int { return len(a) } -func (a bySpecSection) Less(i, j int) bool { return a[i].Less(a[j]) } -func (a bySpecSection) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type specCoverage struct { - coverage map[specPart]bool - d *xml.Decoder -} - -func joinSection(sec []int) string { - s := fmt.Sprintf("%d", sec[0]) - for _, n := range sec[1:] { - s = fmt.Sprintf("%s.%d", s, n) - } - return s -} - -func (sc specCoverage) readSection(sec []int) { - var ( - buf = new(bytes.Buffer) - sub = 0 - ) - for { - tk, err := sc.d.Token() - if err != nil { - if err == io.EOF { - return - } - panic(err) - } - switch v := tk.(type) { - case xml.StartElement: - if skipElement(v) { - if err := sc.d.Skip(); err != nil { - panic(err) - } - if v.Name.Local == "section" { - sub++ - } - break - } - switch v.Name.Local { - case "section": - sub++ - sc.readSection(append(sec, sub)) - case "xref": - buf.Write(sc.readXRef(v)) - } - case xml.CharData: - if len(sec) == 0 { - break - } - buf.Write(v) - case xml.EndElement: - if v.Name.Local == "section" { - sc.addSentences(joinSection(sec), buf.String()) - return - } - } - } -} - -func (sc specCoverage) readXRef(se xml.StartElement) []byte { - var b []byte - for { - tk, err := sc.d.Token() - if err != nil { - panic(err) - } - switch v := tk.(type) { - case xml.CharData: - if b != nil { - panic("unexpected CharData") - } - b = []byte(string(v)) - case xml.EndElement: - if v.Name.Local != "xref" { - panic("expected ") - } - if b != nil { - return b - } - sig := attrSig(se) - switch sig { - case "target": - return []byte(fmt.Sprintf("[%s]", attrValue(se, "target"))) - case "fmt-of,rel,target", "fmt-,,rel,target": - return []byte(fmt.Sprintf("[%s, %s]", attrValue(se, "target"), attrValue(se, "rel"))) - case "fmt-of,sec,target", "fmt-,,sec,target": - return []byte(fmt.Sprintf("[section %s of %s]", attrValue(se, "sec"), attrValue(se, "target"))) - case "fmt-of,rel,sec,target": - return []byte(fmt.Sprintf("[section %s of %s, %s]", attrValue(se, "sec"), attrValue(se, "target"), attrValue(se, "rel"))) - default: - panic(fmt.Sprintf("unknown attribute signature %q in %#v", sig, fmt.Sprintf("%#v", se))) - } - default: - panic(fmt.Sprintf("unexpected tag %q", v)) - } - } -} - -var skipAnchor = map[string]bool{ - "intro": true, - "Overview": true, -} - -var skipTitle = map[string]bool{ - "Acknowledgements": true, - "Change Log": true, - "Document Organization": true, - "Conventions and Terminology": true, -} - -func skipElement(s xml.StartElement) bool { - switch s.Name.Local { - case "artwork": - return true - case "section": - for _, attr := range s.Attr { - switch attr.Name.Local { - case "anchor": - if skipAnchor[attr.Value] || strings.HasPrefix(attr.Value, "changes.since.") { - return true - } - case "title": - if skipTitle[attr.Value] { - return true - } - } - } - } - return false -} - -func readSpecCov(r io.Reader) specCoverage { - sc := specCoverage{ - coverage: map[specPart]bool{}, - d: xml.NewDecoder(r)} - sc.readSection(nil) - return sc -} - -func (sc specCoverage) addSentences(sec string, sentence string) { - for _, s := range parseSentences(sentence) { - sc.coverage[specPart{sec, s}] = false - } -} - -func (sc specCoverage) cover(sec string, sentence string) { - for _, s := range parseSentences(sentence) { - p := specPart{sec, s} - if _, ok := sc.coverage[p]; !ok { - panic(fmt.Sprintf("Not found in spec: %q, %q", sec, s)) - } - sc.coverage[specPart{sec, s}] = true - } - -} - -var whitespaceRx = regexp.MustCompile(`\s+`) - -func parseSentences(sens string) []string { - sens = strings.TrimSpace(sens) - if sens == "" { - return nil - } - ss := strings.Split(whitespaceRx.ReplaceAllString(sens, " "), ". ") - for i, s := range ss { - s = strings.TrimSpace(s) - if !strings.HasSuffix(s, ".") { - s += "." - } - ss[i] = s - } - return ss -} - -func TestSpecParseSentences(t *testing.T) { - tests := []struct { - ss string - want []string - }{ - {"Sentence 1. Sentence 2.", - []string{ - "Sentence 1.", - "Sentence 2.", - }}, - {"Sentence 1. \nSentence 2.\tSentence 3.", - []string{ - "Sentence 1.", - "Sentence 2.", - "Sentence 3.", - }}, - } - - for i, tt := range tests { - got := parseSentences(tt.ss) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("%d: got = %q, want %q", i, got, tt.want) - } - } -} - -func TestSpecCoverage(t *testing.T) { - if !*coverSpec { - t.Skip() - } - - loadSpecOnce.Do(loadSpec) - - var ( - list []specPart - cv = defaultSpecCoverage.coverage - total = len(cv) - complete = 0 - ) - - for sp, touched := range defaultSpecCoverage.coverage { - if touched { - complete++ - } else { - list = append(list, sp) - } - } - sort.Stable(bySpecSection(list)) - - if testing.Short() && len(list) > 5 { - list = list[:5] - } - - for _, p := range list { - t.Errorf("\tSECTION %s: %s", p.section, p.sentence) - } - - t.Logf("%d/%d (%d%%) sentences covered", complete, total, (complete/total)*100) -} - -func attrSig(se xml.StartElement) string { - var names []string - for _, attr := range se.Attr { - if attr.Name.Local == "fmt" { - names = append(names, "fmt-"+attr.Value) - } else { - names = append(names, attr.Name.Local) - } - } - sort.Strings(names) - return strings.Join(names, ",") -} - -func attrValue(se xml.StartElement, attr string) string { - for _, a := range se.Attr { - if a.Name.Local == attr { - return a.Value - } - } - panic("unknown attribute " + attr) -} - -func TestSpecPartLess(t *testing.T) { - tests := []struct { - sec1, sec2 string - want bool - }{ - {"6.2.1", "6.2", false}, - {"6.2", "6.2.1", true}, - {"6.10", "6.10.1", true}, - {"6.10", "6.1.1", false}, // 10, not 1 - {"6.1", "6.1", false}, // equal, so not less - } - for _, tt := range tests { - got := (specPart{tt.sec1, "foo"}).Less(specPart{tt.sec2, "foo"}) - if got != tt.want { - t.Errorf("Less(%q, %q) = %v; want %v", tt.sec1, tt.sec2, got, tt.want) - } - } -} diff --git a/pkg/sync-http2-pkg.sh b/pkg/sync-http2-pkg.sh index ceea83b..70940ce 100755 --- a/pkg/sync-http2-pkg.sh +++ b/pkg/sync-http2-pkg.sh @@ -6,7 +6,7 @@ cd $(dirname "$0") -TAG=v0.21.0 +TAG=v0.33.0 TAG_ARCHIVE_FILENAME=$TAG.tar.gz LOCAL_ARCHIVE_FILENAME=/tmp/$TAG_ARCHIVE_FILENAME