Commit bf7b2548 authored by Fernando Álvarez's avatar Fernando Álvarez Committed by Matt Holt

log, errors: Introduce `rotate_compress` option (#1731)

* vendor: update Lumberjack dep

* httpserver/roller: introduce rotate_compress directive

This directive will enable gzip compression provided by [Lumberjack](https://github.com/natefinch/lumberjack/pull/43).

The directive `rotate_compress` can be `true` or `false`, being `false` by default.

* httpserver/roller: remove need to set bool with rotate_compress option
parent 3bc92540
...@@ -85,13 +85,14 @@ func TestErrorsParse(t *testing.T) { ...@@ -85,13 +85,14 @@ func TestErrorsParse(t *testing.T) {
Roller: httpserver.DefaultLogRoller(), Roller: httpserver.DefaultLogRoller(),
}, },
}}, }},
{`errors errors.txt { rotate_size 2 rotate_age 10 rotate_keep 3 }`, false, ErrorHandler{ {`errors errors.txt { rotate_size 2 rotate_age 10 rotate_keep 3 rotate_compress }`, false, ErrorHandler{
ErrorPages: map[int]string{}, ErrorPages: map[int]string{},
Log: &httpserver.Logger{ Log: &httpserver.Logger{
Output: "errors.txt", Roller: &httpserver.LogRoller{ Output: "errors.txt", Roller: &httpserver.LogRoller{
MaxSize: 2, MaxSize: 2,
MaxAge: 10, MaxAge: 10,
MaxBackups: 3, MaxBackups: 3,
Compress: true,
LocalTime: true, LocalTime: true,
}, },
}, },
...@@ -113,6 +114,7 @@ func TestErrorsParse(t *testing.T) { ...@@ -113,6 +114,7 @@ func TestErrorsParse(t *testing.T) {
MaxSize: 3, MaxSize: 3,
MaxAge: 11, MaxAge: 11,
MaxBackups: 5, MaxBackups: 5,
Compress: false,
LocalTime: true, LocalTime: true,
}, },
}, },
......
...@@ -14,6 +14,7 @@ type LogRoller struct { ...@@ -14,6 +14,7 @@ type LogRoller struct {
MaxSize int MaxSize int
MaxAge int MaxAge int
MaxBackups int MaxBackups int
Compress bool
LocalTime bool LocalTime bool
} }
...@@ -37,6 +38,7 @@ func (l LogRoller) GetLogWriter() io.Writer { ...@@ -37,6 +38,7 @@ func (l LogRoller) GetLogWriter() io.Writer {
MaxSize: l.MaxSize, MaxSize: l.MaxSize,
MaxAge: l.MaxAge, MaxAge: l.MaxAge,
MaxBackups: l.MaxBackups, MaxBackups: l.MaxBackups,
Compress: l.Compress,
LocalTime: l.LocalTime, LocalTime: l.LocalTime,
} }
lumberjacks[absPath] = lj lumberjacks[absPath] = lj
...@@ -48,7 +50,8 @@ func (l LogRoller) GetLogWriter() io.Writer { ...@@ -48,7 +50,8 @@ func (l LogRoller) GetLogWriter() io.Writer {
func IsLogRollerSubdirective(subdir string) bool { func IsLogRollerSubdirective(subdir string) bool {
return subdir == directiveRotateSize || return subdir == directiveRotateSize ||
subdir == directiveRotateAge || subdir == directiveRotateAge ||
subdir == directiveRotateKeep subdir == directiveRotateKeep ||
subdir == directiveRotateCompress
} }
// ParseRoller parses roller contents out of c. // ParseRoller parses roller contents out of c.
...@@ -59,7 +62,7 @@ func ParseRoller(l *LogRoller, what string, where string) error { ...@@ -59,7 +62,7 @@ func ParseRoller(l *LogRoller, what string, where string) error {
var value int var value int
var err error var err error
value, err = strconv.Atoi(where) value, err = strconv.Atoi(where)
if err != nil { if what != directiveRotateCompress && err != nil {
return err return err
} }
switch what { switch what {
...@@ -69,6 +72,8 @@ func ParseRoller(l *LogRoller, what string, where string) error { ...@@ -69,6 +72,8 @@ func ParseRoller(l *LogRoller, what string, where string) error {
l.MaxAge = value l.MaxAge = value
case directiveRotateKeep: case directiveRotateKeep:
l.MaxBackups = value l.MaxBackups = value
case directiveRotateCompress:
l.Compress = true
} }
return nil return nil
} }
...@@ -79,6 +84,7 @@ func DefaultLogRoller() *LogRoller { ...@@ -79,6 +84,7 @@ func DefaultLogRoller() *LogRoller {
MaxSize: defaultRotateSize, MaxSize: defaultRotateSize,
MaxAge: defaultRotateAge, MaxAge: defaultRotateAge,
MaxBackups: defaultRotateKeep, MaxBackups: defaultRotateKeep,
Compress: false,
LocalTime: true, LocalTime: true,
} }
} }
...@@ -89,10 +95,12 @@ const ( ...@@ -89,10 +95,12 @@ const (
// defaultRotateAge is 14 days. // defaultRotateAge is 14 days.
defaultRotateAge = 14 defaultRotateAge = 14
// defaultRotateKeep is 10 files. // defaultRotateKeep is 10 files.
defaultRotateKeep = 10 defaultRotateKeep = 10
directiveRotateSize = "rotate_size"
directiveRotateAge = "rotate_age" directiveRotateSize = "rotate_size"
directiveRotateKeep = "rotate_keep" directiveRotateAge = "rotate_age"
directiveRotateKeep = "rotate_keep"
directiveRotateCompress = "rotate_compress"
) )
// lumberjacks maps log filenames to the logger // lumberjacks maps log filenames to the logger
......
...@@ -203,6 +203,7 @@ func TestLogParse(t *testing.T) { ...@@ -203,6 +203,7 @@ func TestLogParse(t *testing.T) {
MaxSize: 2, MaxSize: 2,
MaxAge: 10, MaxAge: 10,
MaxBackups: 3, MaxBackups: 3,
Compress: false,
LocalTime: true, LocalTime: true,
}}, }},
Format: DefaultLogFormat, Format: DefaultLogFormat,
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
package lumberjack package lumberjack
import ( import (
"compress/gzip"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
...@@ -35,6 +37,7 @@ import ( ...@@ -35,6 +37,7 @@ import (
const ( const (
backupTimeFormat = "2006-01-02T15-04-05.000" backupTimeFormat = "2006-01-02T15-04-05.000"
compressSuffix = ".gz"
defaultMaxSize = 100 defaultMaxSize = 100
) )
...@@ -100,9 +103,16 @@ type Logger struct { ...@@ -100,9 +103,16 @@ type Logger struct {
// time. // time.
LocalTime bool `json:"localtime" yaml:"localtime"` LocalTime bool `json:"localtime" yaml:"localtime"`
// Compress determines if the rotated log files should be compressed
// using gzip.
Compress bool `json:"compress" yaml:"compress"`
size int64 size int64
file *os.File file *os.File
mu sync.Mutex mu sync.Mutex
millCh chan bool
startMill sync.Once
} }
var ( var (
...@@ -171,8 +181,8 @@ func (l *Logger) close() error { ...@@ -171,8 +181,8 @@ func (l *Logger) close() error {
// Rotate causes Logger to close the existing log file and immediately create a // Rotate causes Logger to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate // new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to // rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates a cleanup of old log files according // SIGHUP. After rotating, this initiates compression and removal of old log
// to the normal rules. // files according to the configuration.
func (l *Logger) Rotate() error { func (l *Logger) Rotate() error {
l.mu.Lock() l.mu.Lock()
defer l.mu.Unlock() defer l.mu.Unlock()
...@@ -181,16 +191,16 @@ func (l *Logger) Rotate() error { ...@@ -181,16 +191,16 @@ func (l *Logger) Rotate() error {
// rotate closes the current file, moves it aside with a timestamp in the name, // rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs // (if it exists), opens a new file with the original filename, and then runs
// cleanup. // post-rotation processing and removal.
func (l *Logger) rotate() error { func (l *Logger) rotate() error {
if err := l.close(); err != nil { if err := l.close(); err != nil {
return err return err
} }
if err := l.openNew(); err != nil { if err := l.openNew(); err != nil {
return err return err
} }
return l.cleanup() l.mill()
return nil
} }
// openNew opens a new log file for writing, moving any old log file out of the // openNew opens a new log file for writing, moving any old log file out of the
...@@ -252,6 +262,8 @@ func backupName(name string, local bool) string { ...@@ -252,6 +262,8 @@ func backupName(name string, local bool) string {
// would not put it over MaxSize. If there is no such file or the write would // would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created. // put it over the MaxSize, a new file is created.
func (l *Logger) openExistingOrNew(writeLen int) error { func (l *Logger) openExistingOrNew(writeLen int) error {
l.mill()
filename := l.filename() filename := l.filename()
info, err := os_Stat(filename) info, err := os_Stat(filename)
if os.IsNotExist(err) { if os.IsNotExist(err) {
...@@ -285,10 +297,12 @@ func (l *Logger) filename() string { ...@@ -285,10 +297,12 @@ func (l *Logger) filename() string {
return filepath.Join(os.TempDir(), name) return filepath.Join(os.TempDir(), name)
} }
// cleanup deletes old log files, keeping at most l.MaxBackups files, as long as // millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge. // none of them are older than MaxAge.
func (l *Logger) cleanup() error { func (l *Logger) millRunOnce() error {
if l.MaxBackups == 0 && l.MaxAge == 0 { if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
return nil return nil
} }
...@@ -297,38 +311,87 @@ func (l *Logger) cleanup() error { ...@@ -297,38 +311,87 @@ func (l *Logger) cleanup() error {
return err return err
} }
var deletes []logInfo var compress, remove []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) { if l.MaxBackups > 0 && l.MaxBackups < len(files) {
deletes = files[l.MaxBackups:] preserved := make(map[string]bool)
files = files[:l.MaxBackups] var remaining []logInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.Name()
if strings.HasSuffix(fn, compressSuffix) {
fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true
if len(preserved) > l.MaxBackups {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
} }
if l.MaxAge > 0 { if l.MaxAge > 0 {
diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
cutoff := currentTime().Add(-1 * diff) cutoff := currentTime().Add(-1 * diff)
var remaining []logInfo
for _, f := range files { for _, f := range files {
if f.timestamp.Before(cutoff) { if f.timestamp.Before(cutoff) {
deletes = append(deletes, f) remove = append(remove, f)
} else {
remaining = append(remaining, f)
} }
} }
files = remaining
} }
if len(deletes) == 0 { if l.Compress {
return nil for _, f := range files {
if !strings.HasSuffix(f.Name(), compressSuffix) {
compress = append(compress, f)
}
}
} }
go deleteAll(l.dir(), deletes) for _, f := range remove {
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
if err == nil && errRemove != nil {
err = errRemove
}
}
for _, f := range compress {
fn := filepath.Join(l.dir(), f.Name())
errCompress := compressLogFile(fn, fn+compressSuffix)
if err == nil && errCompress != nil {
err = errCompress
}
}
return nil return err
} }
func deleteAll(dir string, files []logInfo) { // millRun runs in a goroutine to manage post-rotation compression and removal
// remove files on a separate goroutine // of old log files.
for _, f := range files { func (l *Logger) millRun() {
for _ = range l.millCh {
// what am I going to do, log this? // what am I going to do, log this?
_ = os.Remove(filepath.Join(dir, f.Name())) _ = l.millRunOnce()
}
}
// mill performs post-rotation compression and removal of stale log files,
// starting the mill goroutine if necessary.
func (l *Logger) mill() {
l.startMill.Do(func() {
l.millCh = make(chan bool, 1)
go l.millRun()
})
select {
case l.millCh <- true:
default:
} }
} }
...@@ -347,13 +410,13 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) { ...@@ -347,13 +410,13 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) {
if f.IsDir() { if f.IsDir() {
continue continue
} }
name := l.timeFromName(f.Name(), prefix, ext) if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
if name == "" { logFiles = append(logFiles, logInfo{t, f})
continue continue
} }
t, err := time.Parse(backupTimeFormat, name) if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
if err == nil {
logFiles = append(logFiles, logInfo{t, f}) logFiles = append(logFiles, logInfo{t, f})
continue
} }
// error parsing means that the suffix at the end was not generated // error parsing means that the suffix at the end was not generated
// by lumberjack, and therefore it's not a backup file. // by lumberjack, and therefore it's not a backup file.
...@@ -367,17 +430,15 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) { ...@@ -367,17 +430,15 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) {
// timeFromName extracts the formatted time from the filename by stripping off // timeFromName extracts the formatted time from the filename by stripping off
// the filename's prefix and extension. This prevents someone's filename from // the filename's prefix and extension. This prevents someone's filename from
// confusing time.parse. // confusing time.parse.
func (l *Logger) timeFromName(filename, prefix, ext string) string { func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
if !strings.HasPrefix(filename, prefix) { if !strings.HasPrefix(filename, prefix) {
return "" return time.Time{}, errors.New("mismatched prefix")
} }
filename = filename[len(prefix):]
if !strings.HasSuffix(filename, ext) { if !strings.HasSuffix(filename, ext) {
return "" return time.Time{}, errors.New("mismatched extension")
} }
filename = filename[:len(filename)-len(ext)] ts := filename[len(prefix) : len(filename)-len(ext)]
return filename return time.Parse(backupTimeFormat, ts)
} }
// max returns the maximum size in bytes of log files before rolling. // max returns the maximum size in bytes of log files before rolling.
...@@ -402,6 +463,61 @@ func (l *Logger) prefixAndExt() (prefix, ext string) { ...@@ -402,6 +463,61 @@ func (l *Logger) prefixAndExt() (prefix, ext string) {
return prefix, ext return prefix, ext
} }
// compressLogFile compresses the given log file, removing the
// uncompressed log file if successful.
func compressLogFile(src, dst string) (err error) {
f, err := os.Open(src)
if err != nil {
return fmt.Errorf("failed to open log file: %v", err)
}
defer f.Close()
fi, err := os_Stat(src)
if err != nil {
return fmt.Errorf("failed to stat log file: %v", err)
}
if err := chown(dst, fi); err != nil {
return fmt.Errorf("failed to chown compressed log file: %v", err)
}
// If this file already exists, we presume it was created by
// a previous attempt to compress the log file.
gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
if err != nil {
return fmt.Errorf("failed to open compressed log file: %v", err)
}
defer gzf.Close()
gz := gzip.NewWriter(gzf)
defer func() {
if err != nil {
os.Remove(dst)
err = fmt.Errorf("failed to compress log file: %v", err)
}
}()
if _, err := io.Copy(gz, f); err != nil {
return err
}
if err := gz.Close(); err != nil {
return err
}
if err := gzf.Close(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
if err := os.Remove(src); err != nil {
return err
}
return nil
}
// logInfo is a convenience struct to return the filename and its embedded // logInfo is a convenience struct to return the filename and its embedded
// timestamp. // timestamp.
type logInfo struct { type logInfo struct {
......
...@@ -434,7 +434,7 @@ ...@@ -434,7 +434,7 @@
"importpath": "gopkg.in/natefinch/lumberjack.v2", "importpath": "gopkg.in/natefinch/lumberjack.v2",
"repository": "https://gopkg.in/natefinch/lumberjack.v2", "repository": "https://gopkg.in/natefinch/lumberjack.v2",
"vcs": "git", "vcs": "git",
"revision": "dd45e6a67c53f673bb49ca8a001fd3a63ceb640e", "revision": "df99d62fd42d8b3752c8a42c6723555372c02a03",
"branch": "v2.0", "branch": "v2.0",
"notests": true "notests": true
}, },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment