Permalink
Browse files

gofmt -tabs=false -tabwidth=2

I am not very happy with the formatting gofmt provides, but as there's
the cult of gophers who seems to love this tool, it makes sense to try
to avoid "why not gofmt!?" discussions and not distract existing go
users with differing code style.

Next step is obviously to write logstash-forwarder in !Go just to
avoid formatting wars, right? ;)
  • Loading branch information...
jordansissel committed Feb 4, 2014
1 parent d2ba889 commit ae65de4805a73208f7081996be1cb9c43131496d
Showing with 149 additions and 122 deletions.
  1. +14 −14 config.go
  2. +14 −6 config_test.go
  3. +0 −1 emitter.go
  4. +3 −3 event.go
  5. +0 −1 filecompare.go
  6. +3 −3 filestate_darwin.go
  7. +3 −3 filestate_linux.go
  8. +3 −3 filestate_windows.go
  9. +11 −11 harvester.go
  10. +5 −5 logstash-forwarder.go
  11. +11 −9 prospector.go
  12. +43 −25 publisher1.go
  13. +1 −2 registrar.go
  14. +2 −1 registrar_other.go
  15. +1 −1 registrar_windows.go
  16. +32 −32 spooler.go
  17. +2 −1 syslog.go
  18. +1 −1 syslog_windows.go
View
@@ -2,27 +2,27 @@ package main
import (
"encoding/json"
"os"
"log"
"os"
"time"
)
type Config struct {
Network NetworkConfig `json:network`
Files []FileConfig `json:files`
Files []FileConfig `json:files`
}
type NetworkConfig struct {
Servers []string `json:servers`
SSLCertificate string `json:"ssl certificate"`
SSLKey string `json:"ssl key"`
SSLCA string `json:"ssl ca"`
Timeout int64 `json:timeout`
timeout time.Duration
}
Servers []string `json:servers`
SSLCertificate string `json:"ssl certificate"`
SSLKey string `json:"ssl key"`
SSLCA string `json:"ssl ca"`
Timeout int64 `json:timeout`
timeout time.Duration
}
type FileConfig struct {
Paths []string `json:paths`
Paths []string `json:paths`
Fields map[string]string `json:fields`
//DeadTime time.Duration `json:"dead time"`
}
@@ -37,7 +37,7 @@ func LoadConfig(path string) (config Config, err error) {
fi, _ := config_file.Stat()
if fi.Size() > (10 << 20) {
log.Printf("Config file too large? Aborting, just in case. '%s' is %d bytes\n",
path, fi)
path, fi)
return
}
@@ -58,9 +58,9 @@ func LoadConfig(path string) (config Config, err error) {
config.Network.timeout = time.Duration(config.Network.Timeout) * time.Second
//for _, fileconfig := range config.Files {
//if fileconfig.DeadTime == 0 {
//fileconfig.DeadTime = 24 * time.Hour
//}
//if fileconfig.DeadTime == 0 {
//fileconfig.DeadTime = 24 * time.Hour
//}
//}
return
View
@@ -1,21 +1,29 @@
package main
import (
"testing"
"encoding/json"
"testing"
)
type FileConfig struct {
Paths []string "json:paths"
Paths []string "json:paths"
Fields map[string]string "json:fields"
}
func TestJSONLoading(t *testing.T) {
var f File
err := json.Unmarshal([]byte("{ \"paths\": [ \"/var/log/fail2ban.log\" ], \"fields\": { \"type\": \"fail2ban\" } }"), &f)
if err != nil { t.Fatalf("json.Unmarshal failed") }
if len(f.Paths) != 1 { t.FailNow() }
if f.Paths[0] != "/var/log/fail2ban.log" { t.FailNow() }
if f.Fields["type"] != "fail2ban" { t.FailNow() }
if err != nil {
t.Fatalf("json.Unmarshal failed")
}
if len(f.Paths) != 1 {
t.FailNow()
}
if f.Paths[0] != "/var/log/fail2ban.log" {
t.FailNow()
}
if f.Fields["type"] != "fail2ban" {
t.FailNow()
}
}
View
@@ -1,2 +1 @@
package main
View
@@ -4,9 +4,9 @@ import "os"
type FileEvent struct {
Source *string `json:"source,omitempty"`
Offset int64 `json:"offset,omitempty"`
Line uint64 `json:"line,omitempty"`
Text *string `json:"text,omitempty"`
Offset int64 `json:"offset,omitempty"`
Line uint64 `json:"line,omitempty"`
Text *string `json:"text,omitempty"`
Fields *map[string]string
fileinfo *os.FileInfo
View
@@ -32,4 +32,3 @@ func is_file_renamed(file string, info os.FileInfo, fileinfo map[string]os.FileI
}
return false
}
View
@@ -2,7 +2,7 @@ package main
type FileState struct {
Source *string `json:"source,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device int32 `json:"device,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device int32 `json:"device,omitempty"`
}
View
@@ -2,7 +2,7 @@ package main
type FileState struct {
Source *string `json:"source,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device uint64 `json:"device,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device uint64 `json:"device,omitempty"`
}
View
@@ -2,7 +2,7 @@ package main
type FileState struct {
Source *string `json:"source,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device uint64 `json:"device,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device uint64 `json:"device,omitempty"`
}
View
@@ -1,16 +1,16 @@
package main
import (
"os" // for File and friends
"log"
"bufio"
"bytes"
"io"
"bufio"
"log"
"os" // for File and friends
"time"
)
type Harvester struct {
Path string /* the file path to harvest */
Path string /* the file path to harvest */
Fields map[string]string
Offset int64
@@ -71,14 +71,14 @@ func (h *Harvester) Harvest(output chan *FileEvent) {
line++
event := &FileEvent{
Source: &h.Path,
Offset: offset,
Line: line,
Text: text,
Fields: &h.Fields,
Source: &h.Path,
Offset: offset,
Line: line,
Text: text,
Fields: &h.Fields,
fileinfo: &info,
}
offset += int64(len(*event.Text)) + 1 // +1 because of the line terminator
offset += int64(len(*event.Text)) + 1 // +1 because of the line terminator
output <- event // ship the new event downstream
} /* forever */
@@ -89,7 +89,7 @@ func (h *Harvester) open() *os.File {
if h.Path == "-" {
h.file = os.Stdin
return h.file
}
}
for {
var err error
View
@@ -1,16 +1,16 @@
package main
import (
"flag"
"log"
"os"
"time"
"flag"
"runtime/pprof"
"time"
)
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
var spool_size = flag.Uint64("spool-size", 1024, "Maximum number of events to spool before a flush is forced.")
var idle_timeout = flag.Duration("idle-flush-time", 5 * time.Second, "Maximum time to wait for a full spool before flushing anyway")
var idle_timeout = flag.Duration("idle-flush-time", 5*time.Second, "Maximum time to wait for a full spool before flushing anyway")
var config_file = flag.String("config", "", "The config file to load")
var use_syslog = flag.Bool("log-to-syslog", false, "Log to syslog instead of stdout")
var from_beginning = flag.Bool("from-beginning", false, "Read new files from the beginning, instead of the end")
@@ -21,7 +21,7 @@ func main() {
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
log.Fatal(err)
}
pprof.StartCPUProfile(f)
go func() {
@@ -52,7 +52,7 @@ func main() {
// - registrar: records positions of files read
// Finally, prospector uses the registrar information, on restart, to
// determine where in each file to resume a harvester.
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
if *use_syslog {
configureSyslog()
View
@@ -1,11 +1,11 @@
package main
import (
"time"
"path/filepath"
"encoding/json"
"os"
"log"
"os"
"path/filepath"
"time"
)
func Prospect(fileconfig FileConfig, output chan *FileEvent) {
@@ -49,7 +49,9 @@ func resume_tracking(fileconfig FileConfig, fileinfo map[string]os.FileInfo, out
// if the file is the same inode/device as we last saw,
// start a harvester on it at the last known position
info, err := os.Stat(path)
if err != nil { continue }
if err != nil {
continue
}
if is_file_same(path, info, state) {
// same file, seek to last known position
@@ -58,7 +60,7 @@ func resume_tracking(fileconfig FileConfig, fileinfo map[string]os.FileInfo, out
for _, pathglob := range fileconfig.Paths {
match, _ := filepath.Match(pathglob, path)
if match {
harvester := Harvester{Path: path, Fields: fileconfig.Fields, Offset: state.Offset }
harvester := Harvester{Path: path, Fields: fileconfig.Fields, Offset: state.Offset}
go harvester.Harvest(output)
break
}
@@ -68,9 +70,9 @@ func resume_tracking(fileconfig FileConfig, fileinfo map[string]os.FileInfo, out
}
}
func prospector_scan(path string, fields map[string]string,
fileinfo map[string]os.FileInfo,
output chan *FileEvent) {
func prospector_scan(path string, fields map[string]string,
fileinfo map[string]os.FileInfo,
output chan *FileEvent) {
//log.Printf("Prospecting %s\n", path)
// Evaluate the path as a wildcards/shell glob
@@ -109,7 +111,7 @@ func prospector_scan(path string, fields map[string]string,
// Conditions for starting a new harvester:
// - file path hasn't been seen before
// - the file's inode or device changed
if !is_known {
if !is_known {
// TODO(sissel): Skip files with modification dates older than N
// TODO(sissel): Make the 'ignore if older than N' tunable
if time.Since(info.ModTime()) > 24*time.Hour {
Oops, something went wrong.

0 comments on commit ae65de4

Please sign in to comment.