Skip to content

Commit

Permalink
Merge pull request #17 from eklitzke/tail
Browse files Browse the repository at this point in the history
When following log files, tail them rather than truncate them.
  • Loading branch information
BartVerc committed Feb 15, 2019
2 parents 623a840 + a4e09a2 commit 1b1c5f3
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 45 deletions.
13 changes: 3 additions & 10 deletions README.md
Expand Up @@ -15,16 +15,9 @@ command line flags.

## Events from log file

The log file is truncated when
processed, so that the next iteration doesn't interpret the same lines
twice. It makes sense to configure your syslogger to multiplex log
entries to a second file:

```
mail.* -/var/log/postfix_exporter_input.log
```

The path to the log file is specified with the `-postfix.logfile_path` flag.
The log file is tailed when processed. Rotating the log files while the exporter
is running is OK. The path to the log file is specified with the
`-postfix.logfile_path` flag.

## Events from systemd

Expand Down
66 changes: 31 additions & 35 deletions postfix_exporter.go
Expand Up @@ -29,6 +29,7 @@ import (
"strings"
"time"

"github.com/hpcloud/tail"
"github.com/prometheus/client_golang/prometheus"
)

Expand All @@ -42,9 +43,9 @@ var (
// PostfixExporter holds the state that should be preserved by the
// Postfix Prometheus metrics exporter across scrapes.
type PostfixExporter struct {
showqPath string
logfilePath string
journal *Journal
showqPath string
journal *Journal
tailer *tail.Tail

// Metrics that should persist after refreshes, based on logs.
cleanupProcesses prometheus.Counter
Expand Down Expand Up @@ -424,41 +425,36 @@ func (e *PostfixExporter) CollectFromLogline(line string) {
}
}

// CollectLogfileFromReader collects metrics from a Postfix logfile,
// using a reader object.
func (e *PostfixExporter) CollectLogfileFromReader(file io.Reader) error {
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanLines)

for scanner.Scan() {
e.CollectFromLogline(scanner.Text())
}

return scanner.Err()
}

// CollectLogfileFromFile Collects entries from a Postfix log file and
// truncates it. Truncation is performed to ensure that the next
// iteration doesn't end up processing the same log entry twice.
func (e *PostfixExporter) CollectLogfileFromFile(path string) error {
fd, err := os.OpenFile(path, os.O_RDWR, 0)
if err != nil {
return err
}
defer fd.Close()
err = e.CollectLogfileFromReader(fd)
if err != nil {
return err
// CollectLogfileFromFile tails a Postfix log file and collects entries from it.
func (e *PostfixExporter) CollectLogfileFromFile() error {
for {
select {
case line := <-e.tailer.Lines:
e.CollectFromLogline(line.Text)
default:
return nil
}
}
return fd.Truncate(0)
}

// NewPostfixExporter creates a new Postfix exporter instance.
func NewPostfixExporter(showqPath string, logfilePath string, journal *Journal) (*PostfixExporter, error) {
var tailer *tail.Tail
if logfilePath != "" {
var err error
tailer, err = tail.TailFile(logfilePath, tail.Config{
ReOpen: true, // reopen the file if it's rotated
MustExist: true, // fail immediately if the file is missing or has incorrect permissions
Follow: true, // run in follow mode
})
if err != nil {
return nil, err
}
}
return &PostfixExporter{
showqPath: showqPath,
logfilePath: logfilePath,
journal: journal,
showqPath: showqPath,
tailer: tailer,
journal: journal,

cleanupProcesses: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "postfix",
Expand Down Expand Up @@ -623,8 +619,8 @@ func (e *PostfixExporter) Collect(ch chan<- prometheus.Metric) {
err = e.CollectLogfileFromJournal()
src = e.journal.Path
} else {
err = e.CollectLogfileFromFile(e.logfilePath)
src = e.logfilePath
err = e.CollectLogfileFromFile()
src = e.tailer.Filename
}
if err == nil {
ch <- prometheus.MustNewConstMetric(
Expand Down Expand Up @@ -691,7 +687,7 @@ func main() {
journal,
)
if err != nil {
panic(err)
log.Fatalf("Failed to create PostfixExporter: %s", err)
}
prometheus.MustRegister(exporter)

Expand Down

0 comments on commit 1b1c5f3

Please sign in to comment.