Commit bf7b2548 authored by Fernando Álvarez's avatar Fernando Álvarez Committed by Matt Holt

log, errors: Introduce `rotate_compress` option (#1731)

* vendor: update Lumberjack dep

* httpserver/roller: introduce rotate_compress directive

This directive will enable gzip compression provided by [Lumberjack](https://github.com/natefinch/lumberjack/pull/43).

The directive `rotate_compress` can be `true` or `false`, being `false` by default.

* httpserver/roller: remove need to set bool with rotate_compress option
parent 3bc92540
......@@ -85,13 +85,14 @@ func TestErrorsParse(t *testing.T) {
Roller: httpserver.DefaultLogRoller(),
},
}},
{`errors errors.txt { rotate_size 2 rotate_age 10 rotate_keep 3 }`, false, ErrorHandler{
{`errors errors.txt { rotate_size 2 rotate_age 10 rotate_keep 3 rotate_compress }`, false, ErrorHandler{
ErrorPages: map[int]string{},
Log: &httpserver.Logger{
Output: "errors.txt", Roller: &httpserver.LogRoller{
MaxSize: 2,
MaxAge: 10,
MaxBackups: 3,
Compress: true,
LocalTime: true,
},
},
......@@ -113,6 +114,7 @@ func TestErrorsParse(t *testing.T) {
MaxSize: 3,
MaxAge: 11,
MaxBackups: 5,
Compress: false,
LocalTime: true,
},
},
......
......@@ -14,6 +14,7 @@ type LogRoller struct {
MaxSize int
MaxAge int
MaxBackups int
Compress bool
LocalTime bool
}
......@@ -37,6 +38,7 @@ func (l LogRoller) GetLogWriter() io.Writer {
MaxSize: l.MaxSize,
MaxAge: l.MaxAge,
MaxBackups: l.MaxBackups,
Compress: l.Compress,
LocalTime: l.LocalTime,
}
lumberjacks[absPath] = lj
......@@ -48,7 +50,8 @@ func (l LogRoller) GetLogWriter() io.Writer {
func IsLogRollerSubdirective(subdir string) bool {
return subdir == directiveRotateSize ||
subdir == directiveRotateAge ||
subdir == directiveRotateKeep
subdir == directiveRotateKeep ||
subdir == directiveRotateCompress
}
// ParseRoller parses roller contents out of c.
......@@ -59,7 +62,7 @@ func ParseRoller(l *LogRoller, what string, where string) error {
var value int
var err error
value, err = strconv.Atoi(where)
if err != nil {
if what != directiveRotateCompress && err != nil {
return err
}
switch what {
......@@ -69,6 +72,8 @@ func ParseRoller(l *LogRoller, what string, where string) error {
l.MaxAge = value
case directiveRotateKeep:
l.MaxBackups = value
case directiveRotateCompress:
l.Compress = true
}
return nil
}
......@@ -79,6 +84,7 @@ func DefaultLogRoller() *LogRoller {
MaxSize: defaultRotateSize,
MaxAge: defaultRotateAge,
MaxBackups: defaultRotateKeep,
Compress: false,
LocalTime: true,
}
}
......@@ -89,10 +95,12 @@ const (
// defaultRotateAge is 14 days.
defaultRotateAge = 14
// defaultRotateKeep is 10 files.
defaultRotateKeep = 10
directiveRotateSize = "rotate_size"
directiveRotateAge = "rotate_age"
directiveRotateKeep = "rotate_keep"
defaultRotateKeep = 10
directiveRotateSize = "rotate_size"
directiveRotateAge = "rotate_age"
directiveRotateKeep = "rotate_keep"
directiveRotateCompress = "rotate_compress"
)
// lumberjacks maps log filenames to the logger
......
......@@ -203,6 +203,7 @@ func TestLogParse(t *testing.T) {
MaxSize: 2,
MaxAge: 10,
MaxBackups: 3,
Compress: false,
LocalTime: true,
}},
Format: DefaultLogFormat,
......
......@@ -22,6 +22,8 @@
package lumberjack
import (
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
......@@ -35,6 +37,7 @@ import (
const (
backupTimeFormat = "2006-01-02T15-04-05.000"
compressSuffix = ".gz"
defaultMaxSize = 100
)
......@@ -100,9 +103,16 @@ type Logger struct {
// time.
LocalTime bool `json:"localtime" yaml:"localtime"`
// Compress determines if the rotated log files should be compressed
// using gzip.
Compress bool `json:"compress" yaml:"compress"`
size int64
file *os.File
mu sync.Mutex
millCh chan bool
startMill sync.Once
}
var (
......@@ -171,8 +181,8 @@ func (l *Logger) close() error {
// Rotate causes Logger to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates a cleanup of old log files according
// to the normal rules.
// SIGHUP. After rotating, this initiates compression and removal of old log
// files according to the configuration.
func (l *Logger) Rotate() error {
l.mu.Lock()
defer l.mu.Unlock()
......@@ -181,16 +191,16 @@ func (l *Logger) Rotate() error {
// rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs
// cleanup.
// post-rotation processing and removal.
func (l *Logger) rotate() error {
if err := l.close(); err != nil {
return err
}
if err := l.openNew(); err != nil {
return err
}
return l.cleanup()
l.mill()
return nil
}
// openNew opens a new log file for writing, moving any old log file out of the
......@@ -252,6 +262,8 @@ func backupName(name string, local bool) string {
// would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created.
func (l *Logger) openExistingOrNew(writeLen int) error {
l.mill()
filename := l.filename()
info, err := os_Stat(filename)
if os.IsNotExist(err) {
......@@ -285,10 +297,12 @@ func (l *Logger) filename() string {
return filepath.Join(os.TempDir(), name)
}
// cleanup deletes old log files, keeping at most l.MaxBackups files, as long as
// millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *Logger) cleanup() error {
if l.MaxBackups == 0 && l.MaxAge == 0 {
func (l *Logger) millRunOnce() error {
if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
return nil
}
......@@ -297,38 +311,87 @@ func (l *Logger) cleanup() error {
return err
}
var deletes []logInfo
var compress, remove []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
deletes = files[l.MaxBackups:]
files = files[:l.MaxBackups]
preserved := make(map[string]bool)
var remaining []logInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.Name()
if strings.HasSuffix(fn, compressSuffix) {
fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true
if len(preserved) > l.MaxBackups {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
if l.MaxAge > 0 {
diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
cutoff := currentTime().Add(-1 * diff)
var remaining []logInfo
for _, f := range files {
if f.timestamp.Before(cutoff) {
deletes = append(deletes, f)
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
if len(deletes) == 0 {
return nil
if l.Compress {
for _, f := range files {
if !strings.HasSuffix(f.Name(), compressSuffix) {
compress = append(compress, f)
}
}
}
go deleteAll(l.dir(), deletes)
for _, f := range remove {
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
if err == nil && errRemove != nil {
err = errRemove
}
}
for _, f := range compress {
fn := filepath.Join(l.dir(), f.Name())
errCompress := compressLogFile(fn, fn+compressSuffix)
if err == nil && errCompress != nil {
err = errCompress
}
}
return nil
return err
}
func deleteAll(dir string, files []logInfo) {
// remove files on a separate goroutine
for _, f := range files {
// millRun runs in a goroutine to manage post-rotation compression and removal
// of old log files.
func (l *Logger) millRun() {
for _ = range l.millCh {
// what am I going to do, log this?
_ = os.Remove(filepath.Join(dir, f.Name()))
_ = l.millRunOnce()
}
}
// mill performs post-rotation compression and removal of stale log files,
// starting the mill goroutine if necessary.
func (l *Logger) mill() {
l.startMill.Do(func() {
l.millCh = make(chan bool, 1)
go l.millRun()
})
select {
case l.millCh <- true:
default:
}
}
......@@ -347,13 +410,13 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) {
if f.IsDir() {
continue
}
name := l.timeFromName(f.Name(), prefix, ext)
if name == "" {
if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
logFiles = append(logFiles, logInfo{t, f})
continue
}
t, err := time.Parse(backupTimeFormat, name)
if err == nil {
if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
logFiles = append(logFiles, logInfo{t, f})
continue
}
// error parsing means that the suffix at the end was not generated
// by lumberjack, and therefore it's not a backup file.
......@@ -367,17 +430,15 @@ func (l *Logger) oldLogFiles() ([]logInfo, error) {
// timeFromName extracts the formatted time from the filename by stripping off
// the filename's prefix and extension. This prevents someone's filename from
// confusing time.parse.
func (l *Logger) timeFromName(filename, prefix, ext string) string {
func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
if !strings.HasPrefix(filename, prefix) {
return ""
return time.Time{}, errors.New("mismatched prefix")
}
filename = filename[len(prefix):]
if !strings.HasSuffix(filename, ext) {
return ""
return time.Time{}, errors.New("mismatched extension")
}
filename = filename[:len(filename)-len(ext)]
return filename
ts := filename[len(prefix) : len(filename)-len(ext)]
return time.Parse(backupTimeFormat, ts)
}
// max returns the maximum size in bytes of log files before rolling.
......@@ -402,6 +463,61 @@ func (l *Logger) prefixAndExt() (prefix, ext string) {
return prefix, ext
}
// compressLogFile compresses the given log file, removing the
// uncompressed log file if successful.
func compressLogFile(src, dst string) (err error) {
f, err := os.Open(src)
if err != nil {
return fmt.Errorf("failed to open log file: %v", err)
}
defer f.Close()
fi, err := os_Stat(src)
if err != nil {
return fmt.Errorf("failed to stat log file: %v", err)
}
if err := chown(dst, fi); err != nil {
return fmt.Errorf("failed to chown compressed log file: %v", err)
}
// If this file already exists, we presume it was created by
// a previous attempt to compress the log file.
gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
if err != nil {
return fmt.Errorf("failed to open compressed log file: %v", err)
}
defer gzf.Close()
gz := gzip.NewWriter(gzf)
defer func() {
if err != nil {
os.Remove(dst)
err = fmt.Errorf("failed to compress log file: %v", err)
}
}()
if _, err := io.Copy(gz, f); err != nil {
return err
}
if err := gz.Close(); err != nil {
return err
}
if err := gzf.Close(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
if err := os.Remove(src); err != nil {
return err
}
return nil
}
// logInfo is a convenience struct to return the filename and its embedded
// timestamp.
type logInfo struct {
......
......@@ -434,7 +434,7 @@
"importpath": "gopkg.in/natefinch/lumberjack.v2",
"repository": "https://gopkg.in/natefinch/lumberjack.v2",
"vcs": "git",
"revision": "dd45e6a67c53f673bb49ca8a001fd3a63ceb640e",
"revision": "df99d62fd42d8b3752c8a42c6723555372c02a03",
"branch": "v2.0",
"notests": true
},
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment