Commit 59e870b2 authored by Brad Fitzpatrick's avatar Brad Fitzpatrick

http2, http2/hpack: add limit on sum of header block fragments

Fixes golang/go#12916

Change-Id: I085ebc1d3f851d7c1b689a715c4f2fe85be4608a
Reviewed-on: https://go-review.googlesource.com/15821Reviewed-by: 's avatarAndrew Gerrand <adg@golang.org>
parent c94bffa2
...@@ -65,12 +65,16 @@ type Decoder struct { ...@@ -65,12 +65,16 @@ type Decoder struct {
emit func(f HeaderField) emit func(f HeaderField)
emitEnabled bool // whether calls to emit are enabled emitEnabled bool // whether calls to emit are enabled
maxStrLen int // 0 means unlimited
// buf is the unparsed buffer. It's only written to // buf is the unparsed buffer. It's only written to
// saveBuf if it was truncated in the middle of a header // saveBuf if it was truncated in the middle of a header
// block. Because it's usually not owned, we can only // block. Because it's usually not owned, we can only
// process it under Write. // process it under Write.
buf []byte // usually not owned buf []byte // not owned; only valid during Write
// saveBuf is previous data passed to Write which we weren't able
// to fully parse before. Unlike buf, we own this data.
saveBuf bytes.Buffer saveBuf bytes.Buffer
} }
...@@ -87,6 +91,18 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod ...@@ -87,6 +91,18 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
return d return d
} }
// ErrStringLength is returned by Decoder.Write when the max string length
// (as configured by Decoder.SetMaxStringLength) would be violated.
var ErrStringLength = errors.New("hpack: string too long")
// SetMaxStringLength sets the maximum size of a HeaderField name or
// value string, after compression. If a string exceeds this length,
// Write will return ErrStringLength.
// A value of 0 means unlimited and is the default from NewDecoder.
func (d *Decoder) SetMaxStringLength(n int) {
d.maxStrLen = n
}
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder // SetEmitEnabled controls whether the emitFunc provided to NewDecoder
// should be called. The default is true. // should be called. The default is true.
// //
...@@ -269,6 +285,11 @@ func (d *Decoder) Write(p []byte) (n int, err error) { ...@@ -269,6 +285,11 @@ func (d *Decoder) Write(p []byte) (n int, err error) {
if err != nil { if err != nil {
if err == errNeedMore { if err == errNeedMore {
err = nil err = nil
const varIntOverhead = 8 // conservative
if d.maxStrLen != 0 &&
int64(len(d.buf))+int64(d.saveBuf.Len()) > 2*(int64(d.maxStrLen)+varIntOverhead) {
return 0, ErrStringLength
}
d.saveBuf.Write(d.buf) d.saveBuf.Write(d.buf)
} }
break break
...@@ -341,9 +362,8 @@ func (d *Decoder) parseFieldIndexed() error { ...@@ -341,9 +362,8 @@ func (d *Decoder) parseFieldIndexed() error {
if !ok { if !ok {
return DecodingError{InvalidIndexError(idx)} return DecodingError{InvalidIndexError(idx)}
} }
d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
d.buf = buf d.buf = buf
return nil return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
} }
// (same invariants and behavior as parseHeaderFieldRepr) // (same invariants and behavior as parseHeaderFieldRepr)
...@@ -377,14 +397,19 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { ...@@ -377,14 +397,19 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
d.dynTab.add(hf) d.dynTab.add(hf)
} }
hf.Sensitive = it.sensitive() hf.Sensitive = it.sensitive()
d.callEmit(hf) return d.callEmit(hf)
return nil
} }
func (d *Decoder) callEmit(hf HeaderField) { func (d *Decoder) callEmit(hf HeaderField) error {
if d.maxStrLen != 0 {
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
return ErrStringLength
}
}
if d.emitEnabled { if d.emitEnabled {
d.emit(hf) d.emit(hf)
} }
return nil
} }
// (same invariants and behavior as parseHeaderFieldRepr) // (same invariants and behavior as parseHeaderFieldRepr)
......
...@@ -756,3 +756,36 @@ func TestEmitEnabled(t *testing.T) { ...@@ -756,3 +756,36 @@ func TestEmitEnabled(t *testing.T) {
t.Errorf("emit enabled = true; want false") t.Errorf("emit enabled = true; want false")
} }
} }
func TestSaveBufLimit(t *testing.T) {
const maxStr = 1 << 10
var got []HeaderField
dec := NewDecoder(initialHeaderTableSize, func(hf HeaderField) {
got = append(got, hf)
})
dec.SetMaxStringLength(maxStr)
var frag []byte
frag = append(frag[:0], encodeTypeByte(false, false))
frag = appendVarInt(frag, 7, 3)
frag = append(frag, "foo"...)
frag = appendVarInt(frag, 7, 3)
frag = append(frag, "bar"...)
if _, err := dec.Write(frag); err != nil {
t.Fatal(err)
}
want := []HeaderField{{Name: "foo", Value: "bar"}}
if !reflect.DeepEqual(got, want) {
t.Errorf("After small writes, got %v; want %v", got, want)
}
frag = append(frag[:0], encodeTypeByte(false, false))
frag = appendVarInt(frag, 7, maxStr*3)
frag = append(frag, make([]byte, maxStr*3)...)
_, err := dec.Write(frag)
if err != ErrStringLength {
t.Fatalf("Write error = %v; want ErrStringLength", err)
}
}
...@@ -219,6 +219,7 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) { ...@@ -219,6 +219,7 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) {
sc.inflow.add(initialWindowSize) sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField) sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField)
sc.hpackDecoder.SetMaxStringLength(sc.maxHeaderStringLen())
fr := NewFramer(sc.bw, c) fr := NewFramer(sc.bw, c)
fr.SetMaxReadFrameSize(srv.maxReadFrameSize()) fr.SetMaxReadFrameSize(srv.maxReadFrameSize())
...@@ -358,6 +359,16 @@ type serverConn struct { ...@@ -358,6 +359,16 @@ type serverConn struct {
hpackEncoder *hpack.Encoder hpackEncoder *hpack.Encoder
} }
func (sc *serverConn) maxHeaderStringLen() int {
v := sc.maxHeaderListSize()
if uint32(int(v)) == v {
return int(v)
}
// They had a crazy big number for MaxHeaderBytes anyway,
// so give them unlimited header lengths:
return 0
}
func (sc *serverConn) maxHeaderListSize() uint32 { func (sc *serverConn) maxHeaderListSize() uint32 {
n := sc.hs.MaxHeaderBytes n := sc.hs.MaxHeaderBytes
if n <= 0 { if n <= 0 {
...@@ -1275,15 +1286,13 @@ func (sc *serverConn) processContinuation(f *ContinuationFrame) error { ...@@ -1275,15 +1286,13 @@ func (sc *serverConn) processContinuation(f *ContinuationFrame) error {
func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error { func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error {
sc.serveG.check() sc.serveG.check()
if _, err := sc.hpackDecoder.Write(frag); err != nil { if _, err := sc.hpackDecoder.Write(frag); err != nil {
// TODO: convert to stream error I assume? return ConnectionError(ErrCodeCompression)
return err
} }
if !end { if !end {
return nil return nil
} }
if err := sc.hpackDecoder.Close(); err != nil { if err := sc.hpackDecoder.Close(); err != nil {
// TODO: convert to stream error I assume? return ConnectionError(ErrCodeCompression)
return err
} }
defer sc.resetPendingRequest() defer sc.resetPendingRequest()
if sc.curOpenStreams > sc.advMaxStreams { if sc.curOpenStreams > sc.advMaxStreams {
......
...@@ -2345,6 +2345,75 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) { ...@@ -2345,6 +2345,75 @@ func TestServerDoS_MaxHeaderListSize(t *testing.T) {
} }
} }
func TestCompressionErrorOnWrite(t *testing.T) {
const maxStrLen = 8 << 10
var serverConfig *http.Server
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// No response body.
}, func(ts *httptest.Server) {
serverConfig = ts.Config
serverConfig.MaxHeaderBytes = maxStrLen
})
defer st.Close()
st.greet()
maxAllowed := st.sc.maxHeaderStringLen()
// Crank this up, now that we have a conn connected with the
// hpack.Decoder's max string length set has been initialized
// from the earlier low ~8K value. We want this higher so don't
// hit the max header list size. We only want to test hitting
// the max string size.
serverConfig.MaxHeaderBytes = 1 << 20
// First a request with a header that's exactly the max allowed size.
hbf := st.encodeHeader("foo", strings.Repeat("a", maxAllowed))
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
h := st.wantHeaders()
if !h.HeadersEnded() || !h.StreamEnded() {
t.Errorf("Unexpected HEADER frame %v", h)
}
// And now send one that's just one byte too big.
hbf = st.encodeHeader("bar", strings.Repeat("b", maxAllowed+1))
st.writeHeaders(HeadersFrameParam{
StreamID: 3,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
ga := st.wantGoAway()
if ga.ErrCode != ErrCodeCompression {
t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
}
}
func TestCompressionErrorOnClose(t *testing.T) {
st := newServerTester(t, func(w http.ResponseWriter, r *http.Request) {
// No response body.
})
defer st.Close()
st.greet()
hbf := st.encodeHeader("foo", "bar")
hbf = hbf[:len(hbf)-1] // truncate one byte from the end, so hpack.Decoder.Close fails.
st.writeHeaders(HeadersFrameParam{
StreamID: 1,
BlockFragment: hbf,
EndStream: true,
EndHeaders: true,
})
ga := st.wantGoAway()
if ga.ErrCode != ErrCodeCompression {
t.Errorf("GOAWAY err = %v; want ErrCodeCompression", ga.ErrCode)
}
}
func BenchmarkServerGets(b *testing.B) { func BenchmarkServerGets(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment