Skip to content

Commit

Permalink
gofmt -tabs=false -tabwidth=2
Browse files Browse the repository at this point in the history
I am not very happy with the formatting gofmt provides, but as there's
the cult of gophers who seems to love this tool, it makes sense to try
to avoid "why not gofmt!?" discussions and not distract existing go
users with differing code style.

Next step is obviously to write logstash-forwarder in !Go just to
avoid formatting wars, right? ;)
  • Loading branch information
jordansissel committed Feb 4, 2014
1 parent d2ba889 commit ae65de4
Show file tree
Hide file tree
Showing 18 changed files with 149 additions and 122 deletions.
28 changes: 14 additions & 14 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,27 @@ package main

import (
"encoding/json"
"os"
"log"
"os"
"time"
)

type Config struct {
Network NetworkConfig `json:network`
Files []FileConfig `json:files`
Files []FileConfig `json:files`
}

type NetworkConfig struct {
Servers []string `json:servers`
SSLCertificate string `json:"ssl certificate"`
SSLKey string `json:"ssl key"`
SSLCA string `json:"ssl ca"`
Timeout int64 `json:timeout`
timeout time.Duration
}
Servers []string `json:servers`
SSLCertificate string `json:"ssl certificate"`
SSLKey string `json:"ssl key"`
SSLCA string `json:"ssl ca"`
Timeout int64 `json:timeout`
timeout time.Duration
}

type FileConfig struct {
Paths []string `json:paths`
Paths []string `json:paths`
Fields map[string]string `json:fields`
//DeadTime time.Duration `json:"dead time"`
}
Expand All @@ -37,7 +37,7 @@ func LoadConfig(path string) (config Config, err error) {
fi, _ := config_file.Stat()
if fi.Size() > (10 << 20) {
log.Printf("Config file too large? Aborting, just in case. '%s' is %d bytes\n",
path, fi)
path, fi)
return
}

Expand All @@ -58,9 +58,9 @@ func LoadConfig(path string) (config Config, err error) {
config.Network.timeout = time.Duration(config.Network.Timeout) * time.Second

//for _, fileconfig := range config.Files {
//if fileconfig.DeadTime == 0 {
//fileconfig.DeadTime = 24 * time.Hour
//}
//if fileconfig.DeadTime == 0 {
//fileconfig.DeadTime = 24 * time.Hour
//}
//}

return
Expand Down
20 changes: 14 additions & 6 deletions config_test.go
Original file line number Diff line number Diff line change
@@ -1,21 +1,29 @@
package main

import (
"testing"
"encoding/json"
"testing"
)

type FileConfig struct {
Paths []string "json:paths"
Paths []string "json:paths"
Fields map[string]string "json:fields"
}

func TestJSONLoading(t *testing.T) {
var f File
err := json.Unmarshal([]byte("{ \"paths\": [ \"/var/log/fail2ban.log\" ], \"fields\": { \"type\": \"fail2ban\" } }"), &f)

if err != nil { t.Fatalf("json.Unmarshal failed") }
if len(f.Paths) != 1 { t.FailNow() }
if f.Paths[0] != "/var/log/fail2ban.log" { t.FailNow() }
if f.Fields["type"] != "fail2ban" { t.FailNow() }
if err != nil {
t.Fatalf("json.Unmarshal failed")
}
if len(f.Paths) != 1 {
t.FailNow()
}
if f.Paths[0] != "/var/log/fail2ban.log" {
t.FailNow()
}
if f.Fields["type"] != "fail2ban" {
t.FailNow()
}
}
1 change: 0 additions & 1 deletion emitter.go
Original file line number Diff line number Diff line change
@@ -1,2 +1 @@
package main

6 changes: 3 additions & 3 deletions event.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ import "os"

type FileEvent struct {
Source *string `json:"source,omitempty"`
Offset int64 `json:"offset,omitempty"`
Line uint64 `json:"line,omitempty"`
Text *string `json:"text,omitempty"`
Offset int64 `json:"offset,omitempty"`
Line uint64 `json:"line,omitempty"`
Text *string `json:"text,omitempty"`
Fields *map[string]string

fileinfo *os.FileInfo
Expand Down
1 change: 0 additions & 1 deletion filecompare.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,4 +32,3 @@ func is_file_renamed(file string, info os.FileInfo, fileinfo map[string]os.FileI
}
return false
}

6 changes: 3 additions & 3 deletions filestate_darwin.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ package main

type FileState struct {
Source *string `json:"source,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device int32 `json:"device,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device int32 `json:"device,omitempty"`
}
6 changes: 3 additions & 3 deletions filestate_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ package main

type FileState struct {
Source *string `json:"source,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device uint64 `json:"device,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device uint64 `json:"device,omitempty"`
}
6 changes: 3 additions & 3 deletions filestate_windows.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ package main

type FileState struct {
Source *string `json:"source,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device uint64 `json:"device,omitempty"`
Offset int64 `json:"offset,omitempty"`
Inode uint64 `json:"inode,omitempty"`
Device uint64 `json:"device,omitempty"`
}
22 changes: 11 additions & 11 deletions harvester.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
package main

import (
"os" // for File and friends
"log"
"bufio"
"bytes"
"io"
"bufio"
"log"
"os" // for File and friends
"time"
)

type Harvester struct {
Path string /* the file path to harvest */
Path string /* the file path to harvest */
Fields map[string]string
Offset int64

Expand Down Expand Up @@ -71,14 +71,14 @@ func (h *Harvester) Harvest(output chan *FileEvent) {

line++
event := &FileEvent{
Source: &h.Path,
Offset: offset,
Line: line,
Text: text,
Fields: &h.Fields,
Source: &h.Path,
Offset: offset,
Line: line,
Text: text,
Fields: &h.Fields,
fileinfo: &info,
}
offset += int64(len(*event.Text)) + 1 // +1 because of the line terminator
offset += int64(len(*event.Text)) + 1 // +1 because of the line terminator

output <- event // ship the new event downstream
} /* forever */
Expand All @@ -89,7 +89,7 @@ func (h *Harvester) open() *os.File {
if h.Path == "-" {
h.file = os.Stdin
return h.file
}
}

for {
var err error
Expand Down
10 changes: 5 additions & 5 deletions logstash-forwarder.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
package main

import (
"flag"
"log"
"os"
"time"
"flag"
"runtime/pprof"
"time"
)

var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
var spool_size = flag.Uint64("spool-size", 1024, "Maximum number of events to spool before a flush is forced.")
var idle_timeout = flag.Duration("idle-flush-time", 5 * time.Second, "Maximum time to wait for a full spool before flushing anyway")
var idle_timeout = flag.Duration("idle-flush-time", 5*time.Second, "Maximum time to wait for a full spool before flushing anyway")
var config_file = flag.String("config", "", "The config file to load")
var use_syslog = flag.Bool("log-to-syslog", false, "Log to syslog instead of stdout")
var from_beginning = flag.Bool("from-beginning", false, "Read new files from the beginning, instead of the end")
Expand All @@ -21,7 +21,7 @@ func main() {
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
log.Fatal(err)
log.Fatal(err)
}
pprof.StartCPUProfile(f)
go func() {
Expand Down Expand Up @@ -52,7 +52,7 @@ func main() {
// - registrar: records positions of files read
// Finally, prospector uses the registrar information, on restart, to
// determine where in each file to resume a harvester.

log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
if *use_syslog {
configureSyslog()
Expand Down
20 changes: 11 additions & 9 deletions prospector.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
package main

import (
"time"
"path/filepath"
"encoding/json"
"os"
"log"
"os"
"path/filepath"
"time"
)

func Prospect(fileconfig FileConfig, output chan *FileEvent) {
Expand Down Expand Up @@ -49,7 +49,9 @@ func resume_tracking(fileconfig FileConfig, fileinfo map[string]os.FileInfo, out
// if the file is the same inode/device as we last saw,
// start a harvester on it at the last known position
info, err := os.Stat(path)
if err != nil { continue }
if err != nil {
continue
}

if is_file_same(path, info, state) {
// same file, seek to last known position
Expand All @@ -58,7 +60,7 @@ func resume_tracking(fileconfig FileConfig, fileinfo map[string]os.FileInfo, out
for _, pathglob := range fileconfig.Paths {
match, _ := filepath.Match(pathglob, path)
if match {
harvester := Harvester{Path: path, Fields: fileconfig.Fields, Offset: state.Offset }
harvester := Harvester{Path: path, Fields: fileconfig.Fields, Offset: state.Offset}
go harvester.Harvest(output)
break
}
Expand All @@ -68,9 +70,9 @@ func resume_tracking(fileconfig FileConfig, fileinfo map[string]os.FileInfo, out
}
}

func prospector_scan(path string, fields map[string]string,
fileinfo map[string]os.FileInfo,
output chan *FileEvent) {
func prospector_scan(path string, fields map[string]string,
fileinfo map[string]os.FileInfo,
output chan *FileEvent) {
//log.Printf("Prospecting %s\n", path)

// Evaluate the path as a wildcards/shell glob
Expand Down Expand Up @@ -109,7 +111,7 @@ func prospector_scan(path string, fields map[string]string,
// Conditions for starting a new harvester:
// - file path hasn't been seen before
// - the file's inode or device changed
if !is_known {
if !is_known {
// TODO(sissel): Skip files with modification dates older than N
// TODO(sissel): Make the 'ignore if older than N' tunable
if time.Since(info.ModTime()) > 24*time.Hour {
Expand Down
Loading

0 comments on commit ae65de4

Please sign in to comment.