Skip to content
This repository has been archived by the owner on Jun 20, 2024. It is now read-only.

Commit

Permalink
[proxy] rewrote chunked response handler
Browse files Browse the repository at this point in the history
1) We cannot send "Connection: close", because the fsouza docker client
   expects the tcp socket to stay open between requests.

2) Because we cannot force-close the connection, we can't hijack the
   connection (because go's net/http doesn't let use un-hijack it).

3) Because we need to maintain the individual chunking of messages (for
   docker-py), we can't just copy the response body, as Go will remove and
   re-add the chunking willy-nilly.

Therefore, we have to read each chunk one-by-one, and flush the
ResponseWriter after each one.
  • Loading branch information
paulbellamy committed Jul 10, 2015
1 parent f21114e commit de7efa8
Show file tree
Hide file tree
Showing 3 changed files with 155 additions and 262 deletions.
184 changes: 64 additions & 120 deletions proxy/chunked.go
Original file line number Diff line number Diff line change
@@ -1,105 +1,98 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// The wire protocol for HTTP's "chunked" Transfer-Encoding.

// Package internal contains HTTP internals shared by net/http and
// net/http/httputil.
// Based on net/http/internal
package proxy

import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
)

const maxLineLength = 4096 // assumed <= bufio.defaultBufSize

var ErrLineTooLong = errors.New("header line too long")

// NewChunkedReader returns a new chunkedReader that translates the data read from r
// out of HTTP "chunked" format before returning it.
// The chunkedReader returns io.EOF when the final 0-length chunk is read.
//
// NewChunkedReader is not needed by normal applications. The http package
// automatically decodes chunking when reading response bodies.
func NewChunkedReader(r io.Reader) io.Reader {
// Unlike net/http/internal.chunkedReader, this has an interface where we can
// handle individual chunks. The interface is based on database/sql.Rows.
func NewChunkedReader(r io.Reader) *ChunkedReader {
br, ok := r.(*bufio.Reader)
if !ok {
br = bufio.NewReader(r)
}
return &chunkedReader{r: br}
return &ChunkedReader{r: br}
}

type chunkedReader struct {
r *bufio.Reader
n uint64 // unread bytes in chunk
err error
buf [2]byte
type ChunkedReader struct {
r *bufio.Reader
chunk *io.LimitedReader
err error
buf [2]byte
}

func (cr *chunkedReader) beginChunk() {
// chunk-size CRLF
var line []byte
line, cr.err = readLine(cr.r)
// Next prepares the next chunk for reading. It returns true on success, or
// false if there is no next chunk or an error happened while preparing
// it. Err should be consulted to distinguish between the two cases.
//
// Every call to Chunk, even the first one, must be preceded by a call to Next.
//
// Calls to Next will discard any unread bytes in the current Chunk.
func (cr *ChunkedReader) Next() bool {
if cr.err != nil {
return
return false
}
cr.n, cr.err = parseHexUint(line)
if cr.err != nil {
return

// Check the termination of the previous chunk
if cr.chunk != nil {
// Make sure the remainder is drained, in case the user of this quit
// reading early.
if _, cr.err = io.Copy(ioutil.Discard, cr.chunk); cr.err != nil {
return false
}

// Check the next two bytes after the chunk are \r\n
if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err != nil {
return false
}
if cr.buf[0] != '\r' || cr.buf[1] != '\n' {
cr.err = errors.New("malformed chunked encoding")
return false
}
} else {
cr.chunk = &io.LimitedReader{R: cr.r}
}
if cr.n == 0 {

// Setup the next chunk
if n := cr.beginChunk(); n > 0 {
cr.chunk.N = int64(n)
} else {
cr.err = io.EOF
}
return cr.err == nil
}

// Chunk returns the io.Reader of the current chunk. On each call, this returns
// the same io.Reader for a given chunk.
func (cr *ChunkedReader) Chunk() io.Reader {
return cr.chunk
}

func (cr *chunkedReader) chunkHeaderAvailable() bool {
n := cr.r.Buffered()
if n > 0 {
peek, _ := cr.r.Peek(n)
return bytes.IndexByte(peek, '\n') >= 0
// Err returns the error, if any, that was encountered during iteration.
func (cr *ChunkedReader) Err() error {
if cr.err == io.EOF {
return nil
}
return false
return cr.err
}

func (cr *chunkedReader) Read(b []uint8) (n int, err error) {
for cr.err == nil {
if cr.n == 0 {
if n > 0 && !cr.chunkHeaderAvailable() {
// We've read enough. Don't potentially block
// reading a new chunk header.
break
}
cr.beginChunk()
continue
}
if len(b) == 0 {
break
}
rbuf := b
if uint64(len(rbuf)) > cr.n {
rbuf = rbuf[:cr.n]
}
var n0 int
n0, cr.err = cr.r.Read(rbuf)
n += n0
b = b[n0:]
cr.n -= uint64(n0)
// If we're at the end of a chunk, read the next two
// bytes to verify they are "\r\n".
if cr.n == 0 && cr.err == nil {
if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil {
if cr.buf[0] != '\r' || cr.buf[1] != '\n' {
cr.err = errors.New("malformed chunked encoding")
}
}
}
func (cr *ChunkedReader) beginChunk() (n uint64) {
// chunk-size CRLF
var line []byte
line, cr.err = readLine(cr.r)
if cr.err != nil {
return
}
return n, cr.err
n, cr.err = parseHexUint(line)
return
}

// Read a line of bytes (up to \n) from b.
Expand Down Expand Up @@ -134,55 +127,6 @@ func isASCIISpace(b byte) bool {
return b == ' ' || b == '\t' || b == '\n' || b == '\r'
}

// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP
// "chunked" format before writing them to w. Closing the returned chunkedWriter
// sends the final 0-length chunk that marks the end of the stream.
//
// NewChunkedWriter is not needed by normal applications. The http
// package adds chunking automatically if handlers don't set a
// Content-Length header. Using newChunkedWriter inside a handler
// would result in double chunking or chunking with a Content-Length
// length, both of which are wrong.
func NewChunkedWriter(w io.Writer) io.WriteCloser {
return &chunkedWriter{w}
}

// Writing to chunkedWriter translates to writing in HTTP chunked Transfer
// Encoding wire format to the underlying Wire chunkedWriter.
type chunkedWriter struct {
Wire io.Writer
}

// Write the contents of data as one chunk to Wire.
// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
// a bug since it does not check for success of io.WriteString
func (cw *chunkedWriter) Write(data []byte) (n int, err error) {

// Don't send 0-length data. It looks like EOF for chunked encoding.
if len(data) == 0 {
return 0, nil
}

if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil {
return 0, err
}
if n, err = cw.Wire.Write(data); err != nil {
return
}
if n != len(data) {
err = io.ErrShortWrite
return
}
_, err = io.WriteString(cw.Wire, "\r\n")

return
}

func (cw *chunkedWriter) Close() error {
_, err := io.WriteString(cw.Wire, "0\r\n")
return err
}

func parseHexUint(v []byte) (n uint64, err error) {
for _, b := range v {
n <<= 4
Expand Down
Loading

0 comments on commit de7efa8

Please sign in to comment.