Commit b2bc7b9c authored by Kamil Trzciński's avatar Kamil Trzciński 🔴

Merge branch 'fix-logrus-secrets-cleanup' into 'master'

Fix logrus secrets cleanup

See merge request gitlab-org/gitlab-runner!990
parents edd7ee23 7c2cbbb7
Pipeline #27492805 passed with stages
in 28 minutes and 26 seconds
......@@ -421,11 +421,9 @@
[[projects]]
name = "github.com/sirupsen/logrus"
packages = [
".",
"hooks/test"
]
revision = "8c0189d9f6bbf301e5d055d34268156b317016af"
packages = ["."]
revision = "3e01752db0189b9157070a0e1668a620f9a85da2"
version = "v1.0.6"
[[projects]]
name = "github.com/spf13/pflag"
......@@ -700,6 +698,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "4523543d31b7003f03a29eafa27f81595300e5ccb0e1addea605bbebb77c326e"
inputs-digest = "2f7c08f6c7e538cf19f9a9835d18574927a03b07f30557ea80c97d7a6151a875"
solver-name = "gps-cdcl"
solver-version = 1
......@@ -32,9 +32,7 @@ ignored = ["test", "appengine"]
[[constraint]]
name = "github.com/sirupsen/logrus"
# version = "1.0.4"
# 1.0.4 has a race condition on panic in logrous hooks taht is triggere by our test suite
revision = "8c0189d9f6bbf301e5d055d34268156b317016af"
version = "1.0.6"
[[constraint]]
branch = "master"
......
......@@ -9,14 +9,7 @@ import (
type SecretsCleanupHook struct{}
func (s *SecretsCleanupHook) Levels() []logrus.Level {
return []logrus.Level{
logrus.PanicLevel,
logrus.FatalLevel,
logrus.ErrorLevel,
logrus.WarnLevel,
logrus.InfoLevel,
logrus.DebugLevel,
}
return logrus.AllLevels
}
func (s *SecretsCleanupHook) Fire(entry *logrus.Entry) error {
......
package cli_helpers_test
import (
"bytes"
"testing"
logrus_test "github.com/sirupsen/logrus/hooks/test"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"gitlab.com/gitlab-org/gitlab-runner/helpers/cli"
......@@ -23,19 +24,21 @@ func TestSecretsCleanupHook(t *testing.T) {
{
name: "No Secrets",
message: "Fatal: Get http://localhost/?id=123",
expected: "Get http://localhost/?id=123",
expected: "Fatal: Get http://localhost/?id=123",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logger, hook := logrus_test.NewNullLogger()
buffer := &bytes.Buffer{}
logger := logrus.New()
logger.Out = buffer
logger.AddHook(&cli_helpers.SecretsCleanupHook{})
logger.Errorln(test.message)
entry := hook.LastEntry()
assert.NotNil(t, entry)
assert.Contains(t, entry.Message, test.expected)
assert.Contains(t, buffer.String(), test.expected)
})
}
}
......@@ -4,7 +4,7 @@ import (
"regexp"
)
var scrubRegexp = regexp.MustCompile(`(?im)([\?&]((?:private|authenticity|rss)[\-_]token)|X-AMZ-Signature)=[^&]*`)
var scrubRegexp = regexp.MustCompile(`(?im)([\?&]((?:private|authenticity|rss)[\-_]token)|X-AMZ-Signature)=[^& ]*`)
// ScrubSecrets replaces the content of any sensitive query string parameters
// in an URL with `[FILTERED]`
......
......@@ -13,6 +13,7 @@ func TestScrubSecrets(t *testing.T) {
}{
{input: "Get http://localhost/?id=123", output: "Get http://localhost/?id=123"},
{input: "Get http://localhost/?id=123&X-Amz-Signature=abcd1234&private_token=abcd1234", output: "Get http://localhost/?id=123&X-Amz-Signature=[FILTERED]&private_token=[FILTERED]"},
{input: "Get http://localhost/?private_token=abcd1234 test", output: "Get http://localhost/?private_token=[FILTERED] test"},
}
for _, example := range examples {
......
......@@ -48,7 +48,7 @@ type Entry struct {
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
// Default is five fields, give a little extra room
Data: make(Fields, 5),
}
}
......@@ -83,14 +83,28 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time}
}
// Overrides the time of the Entry.
func (entry *Entry) WithTime(t time.Time) *Entry {
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now()
// Default to now, but allow users to override if they want.
//
// We don't have to worry about polluting future calls to Entry#log()
// with this assignment because this function is declared with a
// non-pointer receiver.
if entry.Time.IsZero() {
entry.Time = time.Now()
}
entry.Level = level
entry.Message = msg
......@@ -113,12 +127,10 @@ func (entry Entry) log(level Level, msg string) {
}
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) fireHooks() {
func (entry *Entry) fireHooks() {
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
err := entry.Logger.Hooks.Fire(entry.Level, &entry)
err := entry.Logger.Hooks.Fire(entry.Level, entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
}
......
......@@ -2,6 +2,7 @@ package logrus
import (
"io"
"time"
)
var (
......@@ -15,9 +16,7 @@ func StandardLogger() *Logger {
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
std.SetOutput(out)
}
// SetFormatter sets the standard logger formatter.
......@@ -72,6 +71,15 @@ func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// WithTime creats an entry from the standard logger and overrides the time of
// logs generated with it.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithTime(t time.Time) *Entry {
return std.WithTime(t)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
......@@ -107,7 +115,7 @@ func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
......@@ -147,7 +155,7 @@ func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
......@@ -187,7 +195,7 @@ func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}
......@@ -30,16 +30,22 @@ type Formatter interface {
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
if t, ok := data["time"]; ok {
data["fields.time"] = t
func prefixFieldClashes(data Fields, fieldMap FieldMap) {
timeKey := fieldMap.resolve(FieldKeyTime)
if t, ok := data[timeKey]; ok {
data["fields."+timeKey] = t
delete(data, timeKey)
}
if m, ok := data["msg"]; ok {
data["fields.msg"] = m
msgKey := fieldMap.resolve(FieldKeyMsg)
if m, ok := data[msgKey]; ok {
data["fields."+msgKey] = m
delete(data, msgKey)
}
if l, ok := data["level"]; ok {
data["fields.level"] = l
levelKey := fieldMap.resolve(FieldKeyLevel)
if l, ok := data[levelKey]; ok {
data["fields."+levelKey] = l
delete(data, levelKey)
}
}
......@@ -15,7 +15,7 @@ type Hook struct {
// Entries is an array of all entries that have been received by this hook.
// For safe access, use the AllEntries() method, rather than reading this
// value directly.
Entries []*logrus.Entry
Entries []logrus.Entry
mu sync.RWMutex
}
......@@ -52,7 +52,7 @@ func NewNullLogger() (*logrus.Logger, *Hook) {
func (t *Hook) Fire(e *logrus.Entry) error {
t.mu.Lock()
defer t.mu.Unlock()
t.Entries = append(t.Entries, e)
t.Entries = append(t.Entries, *e)
return nil
}
......@@ -68,9 +68,7 @@ func (t *Hook) LastEntry() *logrus.Entry {
if i < 0 {
return nil
}
// Make a copy, for safety
e := *t.Entries[i]
return &e
return &t.Entries[i]
}
// AllEntries returns all entries that were logged.
......@@ -79,10 +77,9 @@ func (t *Hook) AllEntries() []*logrus.Entry {
defer t.mu.RUnlock()
// Make a copy so the returned value won't race with future log requests
entries := make([]*logrus.Entry, len(t.Entries))
for i, entry := range t.Entries {
for i := 0; i < len(t.Entries); i++ {
// Make a copy, for safety
e := *entry
entries[i] = &e
entries[i] = &t.Entries[i]
}
return entries
}
......@@ -91,5 +88,5 @@ func (t *Hook) AllEntries() []*logrus.Entry {
func (t *Hook) Reset() {
t.mu.Lock()
defer t.mu.Unlock()
t.Entries = make([]*logrus.Entry, 0)
t.Entries = make([]logrus.Entry, 0)
}
package test
import (
"math/rand"
"sync"
"testing"
"time"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
......@@ -38,23 +40,33 @@ func TestAllHooks(t *testing.T) {
}
func TestLoggingWithHooksRace(t *testing.T) {
rand.Seed(time.Now().Unix())
unlocker := rand.Int() % 100
assert := assert.New(t)
logger, hook := NewNullLogger()
var wg sync.WaitGroup
wg.Add(100)
var wgOne, wgAll sync.WaitGroup
wgOne.Add(1)
wgAll.Add(100)
for i := 0; i < 100; i++ {
go func() {
go func(i int) {
logger.Info("info")
wg.Done()
}()
wgAll.Done()
if i == unlocker {
wgOne.Done()
}
}(i)
}
wgOne.Wait()
assert.Equal(logrus.InfoLevel, hook.LastEntry().Level)
assert.Equal("info", hook.LastEntry().Message)
wg.Wait()
wgAll.Wait()
entries := hook.AllEntries()
assert.Equal(100, len(entries))
......
......@@ -33,6 +33,9 @@ type JSONFormatter struct {
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
DataKey string
// FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &JSONFormatter{
......@@ -58,7 +61,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data[k] = v
}
}
prefixFieldClashes(data)
if f.DataKey != "" {
newData := make(Fields, 4)
newData[f.DataKey] = data
data = newData
}
prefixFieldClashes(data, f.FieldMap)
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
......
......@@ -3,6 +3,7 @@ package logrus
import (
"encoding/json"
"errors"
"fmt"
"strings"
"testing"
)
......@@ -106,6 +107,102 @@ func TestFieldClashWithLevel(t *testing.T) {
}
}
func TestFieldClashWithRemappedFields(t *testing.T) {
formatter := &JSONFormatter{
FieldMap: FieldMap{
FieldKeyTime: "@timestamp",
FieldKeyLevel: "@level",
FieldKeyMsg: "@message",
},
}
b, err := formatter.Format(WithFields(Fields{
"@timestamp": "@timestamp",
"@level": "@level",
"@message": "@message",
"timestamp": "timestamp",
"level": "level",
"msg": "msg",
}))
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
for _, field := range []string{"timestamp", "level", "msg"} {
if entry[field] != field {
t.Errorf("Expected field %v to be untouched; got %v", field, entry[field])
}
remappedKey := fmt.Sprintf("fields.%s", field)
if remapped, ok := entry[remappedKey]; ok {
t.Errorf("Expected %s to be empty; got %v", remappedKey, remapped)
}
}
for _, field := range []string{"@timestamp", "@level", "@message"} {
if entry[field] == field {
t.Errorf("Expected field %v to be mapped to an Entry value", field)
}
remappedKey := fmt.Sprintf("fields.%s", field)
if remapped, ok := entry[remappedKey]; ok {
if remapped != field {
t.Errorf("Expected field %v to be copied to %s; got %v", field, remappedKey, remapped)
}
} else {
t.Errorf("Expected field %v to be copied to %s; was absent", field, remappedKey)
}
}
}
func TestFieldsInNestedDictionary(t *testing.T) {
formatter := &JSONFormatter{
DataKey: "args",
}
logEntry := WithFields(Fields{
"level": "level",
"test": "test",
})
logEntry.Level = InfoLevel
b, err := formatter.Format(logEntry)
if err != nil {
t.Fatal("Unable to format entry: ", err)
}
entry := make(map[string]interface{})
err = json.Unmarshal(b, &entry)
if err != nil {
t.Fatal("Unable to unmarshal formatted entry: ", err)
}
args := entry["args"].(map[string]interface{})
for _, field := range []string{"test", "level"} {
if value, present := args[field]; !present || value != field {
t.Errorf("Expected field %v to be present under 'args'; untouched", field)
}
}
for _, field := range []string{"test", "fields.level"} {
if _, present := entry[field]; present {
t.Errorf("Expected field %v not to be present at top level", field)
}
}
// with nested object, "level" shouldn't clash
if entry["level"] != "info" {
t.Errorf("Expected 'level' field to contain 'info'")
}
}
func TestJSONEntryEndsWithNewline(t *testing.T) {
formatter := &JSONFormatter{}
......
......@@ -5,6 +5,7 @@ import (
"os"
"sync"
"sync/atomic"
"time"
)
type Logger struct {
......@@ -88,7 +89,7 @@ func (logger *Logger) releaseEntry(entry *Entry) {
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry()
......@@ -112,6 +113,13 @@ func (logger *Logger) WithError(err error) *Entry {
return entry.WithError(err)
}
// Overrides the time of the log entry.
func (logger *Logger) WithTime(t time.Time) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithTime(t)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.level() >= DebugLevel {
entry := logger.newEntry()
......@@ -316,6 +324,12 @@ func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}
func (logger *Logger) SetOutput(out io.Writer) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Out = out
}
func (logger *Logger) AddHook(hook Hook) {
logger.mu.Lock()
defer logger.mu.Unlock()
......
......@@ -7,6 +7,7 @@ import (
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
......@@ -209,6 +210,65 @@ func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
})
}
func TestWithTimeShouldOverrideTime(t *testing.T) {
now := time.Now().Add(24 * time.Hour)
LogAndAssertJSON(t, func(log *Logger) {
log.WithTime(now).Info("foobar")
}, func(fields Fields) {
assert.Equal(t, fields["time"], now.Format(defaultTimestampFormat))
})
}
func TestWithTimeShouldNotOverrideFields(t *testing.T) {
now := time.Now().Add(24 * time.Hour)
LogAndAssertJSON(t, func(log *Logger) {
log.WithField("herp", "derp").WithTime(now).Info("blah")
}, func(fields Fields) {
assert.Equal(t, fields["time"], now.Format(defaultTimestampFormat))
assert.Equal(t, fields["herp"], "derp")
})
}
func TestWithFieldShouldNotOverrideTime(t *testing.T) {
now := time.Now().Add(24 * time.Hour)
LogAndAssertJSON(t, func(log *Logger) {
log.WithTime(now).WithField("herp", "derp").Info("blah")
}, func(fields Fields) {
assert.Equal(t, fields["time"], now.Format(defaultTimestampFormat))
assert.Equal(t, fields["herp"], "derp")
})
}
func TestTimeOverrideMultipleLogs(t *testing.T) {
var buffer bytes.Buffer
var firstFields, secondFields Fields
logger := New()
logger.Out = &buffer
formatter := new(JSONFormatter)
formatter.TimestampFormat = time.StampMilli
logger.Formatter = formatter
llog := logger.WithField("herp", "derp")
llog.Info("foo")
err := json.Unmarshal(buffer.Bytes(), &firstFields)
assert.NoError(t, err, "should have decoded first message")
buffer.Reset()
time.Sleep(10 * time.Millisecond)
llog.Info("bar")
err = json.Unmarshal(buffer.Bytes(), &secondFields)
assert.NoError(t, err, "should have decoded second message")
assert.NotEqual(t, firstFields["time"], secondFields["time"], "timestamps should not be equal")
}
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
var buffer bytes.Buffer
......
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
// +build !appengine,!gopherjs
package logrus
......
// +build appengine
// +build appengine gopherjs
package logrus
......
// +build !appengine
// +build !appengine,!gopherjs
package logrus
......
......@@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build !appengine,!gopherjs
package logrus
......
......@@ -20,6 +20,7 @@ const (
var (
baseTimestamp time.Time
emptyFieldMap FieldMap
)
func init() {
......@@ -50,12 +51,24 @@ type TextFormatter struct {
// be desired.
DisableSorting bool
// Disables the truncation of the level text to 4 characters.
DisableLevelTruncation bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// Whether the logger's out is to a terminal
isTerminal bool
// FieldMap allows users to customize the names of keys for default fields.
// As an example:
// formatter := &TextFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyMsg: "@message"}}
FieldMap FieldMap
sync.Once
}
......@@ -67,7 +80,8 @@ func (f *TextFormatter) init(entry *Entry) {
// Format renders a single log entry
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer
prefixFieldClashes(entry.Data, f.FieldMap)
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
......@@ -76,14 +90,14 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
if !f.DisableSorting {
sort.Strings(keys)
}
var b *bytes.Buffer
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data)
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
......@@ -96,11 +110,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyTime), entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyLevel), entry.Level.String())
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
f.appendKeyValue(b, f.FieldMap