Permalink
Browse files

Modernize web.go to make it more compatible with the current http pac…

…kage:

- Remove request.go, servefile.go, and cookie.go, which were duplicating a lot of functionality from Go's HTTP versions. I created them to fix bugs in Go's http libraries, which have since been fixed
- Remove web.Request, and use http.Request instead. Most of the custom functionality I needed has been added to http.Request
- Move Params to web.Context
- Create a wrapper for http.ResponseWriter that allows connections to be closed
- Use http.ServeFile and cgi.RequestFromMap (better performance and reliability)
- Lots of small fixes
  • Loading branch information...
1 parent 2f96b10 commit a6d87d9a0916232447bfbd90fcdef78bc451cfcc @hoisie committed Mar 23, 2012
Showing with 201 additions and 914 deletions.
  1. +0 −3 Makefile
  2. +0 −219 cookie.go
  3. +36 −37 fcgi.go
  4. +0 −339 request.go
  5. +31 −34 scgi.go
  6. +0 −107 servefile.go
  7. +65 −88 web.go
  8. +69 −87 web_test.go
View
@@ -1,11 +1,8 @@
GOFMT=gofmt -s -tabs=false -tabwidth=4
GOFILES=\
- cookie.go\
fcgi.go\
- request.go\
scgi.go\
- servefile.go\
status.go\
web.go\
View
219 cookie.go
@@ -1,219 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package web
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "sort"
- "strings"
- "time"
-)
-
-func sanitizeName(n string) string {
- n = strings.Replace(n, "\n", "-", -1)
- n = strings.Replace(n, "\r", "-", -1)
- return n
-}
-
-func sanitizeValue(v string) string {
- v = strings.Replace(v, "\n", " ", -1)
- v = strings.Replace(v, "\r", " ", -1)
- v = strings.Replace(v, ";", " ", -1)
- return v
-}
-
-func isCookieByte(c byte) bool {
- switch true {
- case c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a,
- 0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e:
- return true
- }
- return false
-}
-
-func isSeparator(c byte) bool {
- switch c {
- case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t':
- return true
- }
- return false
-}
-func isChar(c byte) bool { return 0 <= c && c <= 127 }
-func isCtl(c byte) bool { return (0 <= c && c <= 31) || c == 127 }
-func isToken(c byte) bool { return isChar(c) && !isCtl(c) && !isSeparator(c) }
-
-func parseCookieValue(raw string) (string, bool) {
- raw = unquoteCookieValue(raw)
- for i := 0; i < len(raw); i++ {
- if !isCookieByte(raw[i]) {
- return "", false
- }
- }
- return raw, true
-}
-
-func unquoteCookieValue(v string) string {
- if len(v) > 1 && v[0] == '"' && v[len(v)-1] == '"' {
- return v[1 : len(v)-1]
- }
- return v
-}
-
-func isCookieNameValid(raw string) bool {
- for _, c := range raw {
- if !isToken(byte(c)) {
- return false
- }
- }
- return true
-}
-
-// writeSetCookies writes the wire representation of the set-cookies
-// to w. Each cookie is written on a separate "Set-Cookie: " line.
-// This choice is made because HTTP parsers tend to have a limit on
-// line-length, so it seems safer to place cookies on separate lines.
-func writeSetCookies(w io.Writer, kk []*http.Cookie) error {
- if kk == nil {
- return nil
- }
- lines := make([]string, 0, len(kk))
- var b bytes.Buffer
- for _, c := range kk {
- b.Reset()
- // TODO(petar): c.Value (below) should be unquoted if it is recognized as quoted
- fmt.Fprintf(&b, "%s=%s", http.CanonicalHeaderKey(c.Name), c.Value)
- if len(c.Path) > 0 {
- fmt.Fprintf(&b, "; Path=%s", url.QueryEscape(c.Path))
- }
- if len(c.Domain) > 0 {
- fmt.Fprintf(&b, "; Domain=%s", url.QueryEscape(c.Domain))
- }
- if c.Expires.Unix() > 0 {
- fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(time.RFC1123))
- }
- if c.MaxAge >= 0 {
- fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge)
- }
- if c.HttpOnly {
- fmt.Fprintf(&b, "; HttpOnly")
- }
- if c.Secure {
- fmt.Fprintf(&b, "; Secure")
- }
- lines = append(lines, "Set-Cookie: "+b.String()+"\r\n")
- }
- sort.Strings(lines)
- for _, l := range lines {
- if _, err := io.WriteString(w, l); err != nil {
- return err
- }
- }
- return nil
-}
-
-// writeCookies writes the wire representation of the cookies
-// to w. Each cookie is written on a separate "Cookie: " line.
-// This choice is made because HTTP parsers tend to have a limit on
-// line-length, so it seems safer to place cookies on separate lines.
-func writeCookies(w io.Writer, kk []*http.Cookie) error {
- lines := make([]string, 0, len(kk))
- var b bytes.Buffer
- for _, c := range kk {
- b.Reset()
- n := c.Name
- // TODO(petar): c.Value (below) should be unquoted if it is recognized as quoted
- fmt.Fprintf(&b, "%s=%s", http.CanonicalHeaderKey(n), c.Value)
- if len(c.Path) > 0 {
- fmt.Fprintf(&b, "; $Path=%s", url.QueryEscape(c.Path))
- }
- if len(c.Domain) > 0 {
- fmt.Fprintf(&b, "; $Domain=%s", url.QueryEscape(c.Domain))
- }
- if c.HttpOnly {
- fmt.Fprintf(&b, "; $HttpOnly")
- }
- lines = append(lines, "Cookie: "+b.String()+"\r\n")
- }
- sort.Strings(lines)
- for _, l := range lines {
- if _, err := io.WriteString(w, l); err != nil {
- return err
- }
- }
- return nil
-}
-
-// readCookies parses all "Cookie" values from
-// the header h, removes the successfully parsed values from the
-// "Cookie" key in h and returns the parsed Cookies.
-func readCookies(h http.Header) []*http.Cookie {
- cookies := []*http.Cookie{}
- lines, ok := h["Cookie"]
- if !ok {
- return cookies
- }
- unparsedLines := []string{}
- for _, line := range lines {
- parts := strings.Split(strings.TrimSpace(line), ";")
- if len(parts) == 1 && parts[0] == "" {
- continue
- }
- // Per-line attributes
- var lineCookies = make(map[string]string)
- var path string
- var domain string
- var httponly bool
- for i := 0; i < len(parts); i++ {
- parts[i] = strings.TrimSpace(parts[i])
- if len(parts[i]) == 0 {
- continue
- }
- attr, val := parts[i], ""
- var err error
- if j := strings.Index(attr, "="); j >= 0 {
- attr, val = attr[:j], attr[j+1:]
- val, err = url.QueryUnescape(val)
- if err != nil {
- continue
- }
- }
- switch strings.ToLower(attr) {
- case "$httponly":
- httponly = true
- case "$domain":
- domain = val
- // TODO: Add domain parsing
- case "$path":
- path = val
- // TODO: Add path parsing
- default:
- lineCookies[attr] = val
- }
- }
- if len(lineCookies) == 0 {
- unparsedLines = append(unparsedLines, line)
- }
- for n, v := range lineCookies {
- cookies = append(cookies, &http.Cookie{
- Name: n,
- Value: v,
- Path: path,
- Domain: domain,
- HttpOnly: httponly,
- MaxAge: -1,
- Raw: line,
- })
- }
- }
- if len(unparsedLines) > 0 {
- h["Cookie"] = unparsedLines
- }
-
- return cookies
-}
View
73 fcgi.go
@@ -4,10 +4,13 @@ import (
"bufio"
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"io"
+ "io/ioutil"
"net"
"net/http"
+ "net/http/cgi"
"strings"
)
@@ -90,6 +93,7 @@ func (er fcgiEndReq) bytes() []byte {
type fcgiConn struct {
requestId uint16
+ req *http.Request
fd io.ReadWriteCloser
headers http.Header
wroteHeaders bool
@@ -129,51 +133,43 @@ func (conn *fcgiConn) fcgiWrite(data []byte) (err error) {
}
func (conn *fcgiConn) Write(data []byte) (n int, err error) {
- var buf bytes.Buffer
if !conn.wroteHeaders {
- conn.wroteHeaders = true
- for k, v := range conn.headers {
- for _, i := range v {
- buf.WriteString(k + ": " + i + "\r\n")
- }
- }
- buf.WriteString("\r\n")
- conn.fcgiWrite(buf.Bytes())
+ conn.WriteHeader(200)
}
+ if conn.req.Method == "HEAD" {
+ return 0, errors.New("Body Not Allowed")
+ }
err = conn.fcgiWrite(data)
-
if err != nil {
return 0, err
}
return len(data), nil
}
-func (conn *fcgiConn) StartResponse(status int) {
- var buf bytes.Buffer
- text := statusText[status]
- fmt.Fprintf(&buf, "HTTP/1.1 %d %s\r\n", status, text)
- conn.fcgiWrite(buf.Bytes())
-}
+func (conn *fcgiConn) WriteHeader(status int) {
+ if !conn.wroteHeaders {
+ conn.wroteHeaders = true
-func (conn *fcgiConn) SetHeader(hdr string, val string, unique bool) {
- if _, contains := conn.headers[hdr]; !contains {
- conn.headers[hdr] = []string{val}
- return
- }
+ var buf bytes.Buffer
+ text := statusText[status]
+ fmt.Fprintf(&buf, "HTTP/1.1 %d %s\r\n", status, text)
- if unique {
- //just overwrite the first value
- conn.headers[hdr][0] = val
- } else {
- newHeaders := make([]string, len(conn.headers)+1)
- copy(newHeaders, conn.headers[hdr])
- newHeaders[len(newHeaders)-1] = val
- conn.headers[hdr] = newHeaders
+ for k, v := range conn.headers {
+ for _, i := range v {
+ buf.WriteString(k + ": " + i + "\r\n")
+ }
+ }
+ buf.WriteString("\r\n")
+ conn.fcgiWrite(buf.Bytes())
}
}
+func (conn *fcgiConn) Header() http.Header {
+ return conn.headers
+}
+
func (conn *fcgiConn) complete() {
content := fcgiEndReq{appStatus: 200, protocolStatus: fcgiRequestComplete}.bytes()
l := len(content)
@@ -213,7 +209,7 @@ func readFcgiParamSize(data []byte, index int) (int, int) {
}
//read the fcgi parameters contained in data, and store them in storage
-func readFcgiParams(data []byte, storage http.Header) {
+func readFcgiParams(data []byte, storage map[string]string) {
for idx := 0; len(data) > idx; {
keySize, shift := readFcgiParamSize(data, idx)
idx += shift
@@ -223,16 +219,16 @@ func readFcgiParams(data []byte, storage http.Header) {
idx += keySize
val := data[idx : idx+valSize]
idx += valSize
- storage.Set(string(key), string(val))
+ storage[string(key)] = string(val)
}
}
func (s *Server) handleFcgiConnection(fd io.ReadWriteCloser) {
br := bufio.NewReader(fd)
- var req *Request
+ var req *http.Request
var fc *fcgiConn
var body bytes.Buffer
- headers := make(http.Header)
+ headers := map[string]string{}
for {
var h fcgiHeader
@@ -247,8 +243,8 @@ func (s *Server) handleFcgiConnection(fd io.ReadWriteCloser) {
content := make([]byte, h.ContentLength)
_, err = io.ReadFull(br, content)
if err != nil {
- s.Logger.Println("FCGI Error", err.Error())
- break
+ s.Logger.Println("FCGI Error", err.Error())
+ break
}
//read padding
@@ -263,7 +259,7 @@ func (s *Server) handleFcgiConnection(fd io.ReadWriteCloser) {
switch h.Type {
case fcgiBeginRequest:
- fc = &fcgiConn{h.RequestId, fd, make(map[string][]string), false}
+ fc = &fcgiConn{h.RequestId, req, fd, make(map[string][]string), false}
case fcgiParams:
if h.ContentLength > 0 {
@@ -273,7 +269,10 @@ func (s *Server) handleFcgiConnection(fd io.ReadWriteCloser) {
if h.ContentLength > 0 {
body.Write(content)
} else if h.ContentLength == 0 {
- req = newRequestCgi(headers, &body)
+
+ req, _ = cgi.RequestFromMap(headers)
+ req.Body = ioutil.NopCloser(&body)
+ fc.req = req
s.routeHandler(req, fc)
//we close the connection after processing
//TODO: is there a way to keep it open for future requests?
Oops, something went wrong.

0 comments on commit a6d87d9

Please sign in to comment.