Skip to content

Commit

Permalink
http2, http2/hpack: add limit on sum of header block fragments
Browse files Browse the repository at this point in the history
Fixes golang/go#12916

Change-Id: I085ebc1d3f851d7c1b689a715c4f2fe85be4608a
Reviewed-on: https://go-review.googlesource.com/15821
Reviewed-by: Andrew Gerrand <adg@golang.org>
  • Loading branch information
bradfitz committed Oct 14, 2015
1 parent 9bdc309 commit 2183410
Show file tree
Hide file tree
Showing 4 changed files with 146 additions and 10 deletions.
37 changes: 31 additions & 6 deletions hpack/hpack.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,16 @@ type Decoder struct {
emit func(f HeaderField)

emitEnabled bool // whether calls to emit are enabled
maxStrLen int // 0 means unlimited

// buf is the unparsed buffer. It's only written to
// saveBuf if it was truncated in the middle of a header
// block. Because it's usually not owned, we can only
// process it under Write.
buf []byte // usually not owned
buf []byte // not owned; only valid during Write

// saveBuf is previous data passed to Write which we weren't able
// to fully parse before. Unlike buf, we own this data.
saveBuf bytes.Buffer
}

Expand All @@ -87,6 +91,18 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
return d
}

// ErrStringLength is returned by Decoder.Write when the max string length
// (as configured by Decoder.SetMaxStringLength) would be violated.
var ErrStringLength = errors.New("hpack: string too long")

// SetMaxStringLength sets the maximum size of a HeaderField name or
// value string, after compression. If a string exceeds this length,
// Write will return ErrStringLength.
// A value of 0 means unlimited and is the default from NewDecoder.
func (d *Decoder) SetMaxStringLength(n int) {
d.maxStrLen = n
}

// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
// should be called. The default is true.
//
Expand Down Expand Up @@ -269,6 +285,11 @@ func (d *Decoder) Write(p []byte) (n int, err error) {
if err != nil {
if err == errNeedMore {
err = nil
const varIntOverhead = 8 // conservative
if d.maxStrLen != 0 &&
int64(len(d.buf))+int64(d.saveBuf.Len()) > 2*(int64(d.maxStrLen)+varIntOverhead) {
return 0, ErrStringLength
}
d.saveBuf.Write(d.buf)
}
break
Expand Down Expand Up @@ -341,9 +362,8 @@ func (d *Decoder) parseFieldIndexed() error {
if !ok {
return DecodingError{InvalidIndexError(idx)}
}
d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
d.buf = buf
return nil
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
}

// (same invariants and behavior as parseHeaderFieldRepr)
Expand Down Expand Up @@ -377,14 +397,19 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
d.dynTab.add(hf)
}
hf.Sensitive = it.sensitive()
d.callEmit(hf)
return nil
return d.callEmit(hf)
}

func (d *Decoder) callEmit(hf HeaderField) {
func (d *Decoder) callEmit(hf HeaderField) error {
if d.maxStrLen != 0 {
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
return ErrStringLength
}
}
if d.emitEnabled {
d.emit(hf)
}
return nil
}

// (same invariants and behavior as parseHeaderFieldRepr)
Expand Down
33 changes: 33 additions & 0 deletions hpack/hpack_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -756,3 +756,36 @@ func TestEmitEnabled(t *testing.T) {
t.Errorf("emit enabled = true; want false")
}
}

func TestSaveBufLimit(t *testing.T) {
const maxStr = 1 << 10
var got []HeaderField
dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) {
got = append(got, hf)
})
dec.SetMaxStringLength(maxStr)
var frag []byte
frag = append(frag[:0], encodeTypeByte(false, false))
frag = appendVarInt(frag, 7, 3)
frag = append(frag, "foo"...)
frag = appendVarInt(frag, 7, 3)
frag = append(frag, "bar"...)

if _, err := dec.Write(frag); err != nil {
t.Fatal(err)
}

want := []HeaderField{{Name: "foo", Value: "bar"}}
if !reflect.DeepEqual(got, want) {
t.Errorf("After small writes, got %v; want %v", got, want)
}

frag = append(frag[:0], encodeTypeByte(false, false))
frag = appendVarInt(frag, 7, maxStr*3)
frag = append(frag, make([]byte, maxStr*3)...)

_, err := dec.Write(frag)
if err != ErrStringLength {
t.Fatalf("Write error = %v; want ErrStringLength", err)
}
}
17 changes: 13 additions & 4 deletions server.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,7 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) {
sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField)
sc.hpackDecoder.SetMaxStringLength(sc.maxHeaderStringLen())

fr := NewFramer(sc.bw, c)
fr.SetMaxReadFrameSize(srv.maxReadFrameSize())
Expand Down Expand Up @@ -358,6 +359,16 @@ type serverConn struct {
hpackEncoder *hpack.Encoder
}

func (sc *serverConn) maxHeaderStringLen() int {
v := sc.maxHeaderListSize()
if uint32(int(v)) == v {
return int(v)
}
// They had a crazy big number for MaxHeaderBytes anyway,
// so give them unlimited header lengths:
return 0
}

func (sc *serverConn) maxHeaderListSize() uint32 {
n := sc.hs.MaxHeaderBytes
if n <= 0 {
Expand Down Expand Up @@ -1275,15 +1286,13 @@ func (sc *serverConn) processContinuation(f *ContinuationFrame) error {
func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error {
sc.serveG.check()
if _, err := sc.hpackDecoder.Write(frag); err != nil {
// TODO: convert to stream error I assume?
return err
return ConnectionError(ErrCodeCompression)
}
if !end {
return nil
}
if err := sc.hpackDecoder.Close(); err != nil {
// TODO: convert to stream error I assume?
return err
return ConnectionError(ErrCodeCompression)
}
defer sc.resetPendingRequest()
if sc.curOpenStreams > sc.advMaxStreams {
Expand Down
69 changes: 69 additions & 0 deletions server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2345,6 +2345,75 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) {
}
}

func TestCompressionErrorOnWrite(t *testing.T) {
const maxStrLen = 8 << 10
var serverConfig *http.Server
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// No response body.
}, func(ts *httptest.Server) {
serverConfig = ts.Config
serverConfig.MaxHeaderBytes = maxStrLen
})
defer st.Close()
st.greet()

maxAllowed := st.sc.maxHeaderStringLen()

// Crank this up, now that we have a conn connected with the
// hpack.Decoder's max string length set has been initialized
// from the earlier low ~8K value. We want this higher so don't
// hit the max header list size. We only want to test hitting
// the max string size.
serverConfig.MaxHeaderBytes = 1 << 20

// First a request with a header that's exactly the max allowed size.
hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed))
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
h := st.wantHeaders()
if !h.HeadersEnded() || !h.StreamEnded() {
t.Errorf("Unexpected HEADER frame %v", h)
}

// And now send one that's just one byte too big.
hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1))
st.writeHeaders(HeadersFrameParam{
StreamID: 3,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
ga := st.wantGoAway()
if ga.ErrCode != ErrCodeCompression {
t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
}
}

func TestCompressionErrorOnClose(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// No response body.
})
defer st.Close()
st.greet()

hbf := st.encodeHeader("foo", "bar")
hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails.
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
ga := st.wantGoAway()
if ga.ErrCode != ErrCodeCompression {
t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
}
}

func BenchmarkServerGets(b *testing.B) {
b.ReportAllocs()

Expand Down

0 comments on commit 2183410

Please sign in to comment.