feat: stack log prefixes to differentiate between log emitters

The goal is to be able to tell apart logs from difference sources, such
as `bootstrap.parallel_best_resolver` and `parallel_best_resolver`.
This commit is contained in:
ThinkChaos 2022-12-02 22:21:17 -05:00
parent e47a8b7f60
commit 7c76836373
12 changed files with 46 additions and 31 deletions

View File

@ -3,6 +3,7 @@ package log
//go:generate go run github.com/abice/go-enum -f=$GOFILE --marshal --names
import (
"fmt"
"io"
"strings"
@ -10,6 +11,8 @@ import (
prefixed "github.com/x-cray/logrus-prefixed-formatter"
)
const prefixField = "prefix"
// Logger is the global logging instance
//
//nolint:gochecknoglobals
@ -60,7 +63,16 @@ func Log() *logrus.Logger {
// PrefixedLog return the global logger with prefix
func PrefixedLog(prefix string) *logrus.Entry {
return logger.WithField("prefix", prefix)
return logger.WithField(prefixField, prefix)
}
// WithPrefix adds the given prefix to the logger.
func WithPrefix(logger *logrus.Entry, prefix string) *logrus.Entry {
if existingPrefix, ok := logger.Data[prefixField]; ok {
prefix = fmt.Sprintf("%s.%s", existingPrefix, prefix)
}
return logger.WithField(prefixField, prefix)
}
// EscapeInput removes line breaks from input

View File

@ -164,7 +164,7 @@ func createDownloader(cfg config.BlockingConfig, bootstrap *Bootstrap) *lists.HT
}
func setupRedisEnabledSubscriber(c *BlockingResolver) {
logger := logger("blocking_resolver")
logger := log.PrefixedLog("blocking_resolver")
go func() {
for em := range c.redisClient.EnabledChannel {
@ -410,7 +410,7 @@ func (r *BlockingResolver) handleBlacklist(groupsToCheck []string,
// Resolve checks the query against the blacklist and delegates to next resolver if domain is not blocked
func (r *BlockingResolver) Resolve(request *model.Request) (*model.Response, error) {
logger := withPrefix(request.Log, "blacklist_resolver")
logger := log.WithPrefix(request.Log, "blacklist_resolver")
groupsToCheck := r.groupsToCheckForClient(request)
if len(groupsToCheck) > 0 {

View File

@ -10,6 +10,7 @@ import (
"github.com/0xERR0R/blocky/cache/expirationcache"
"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/evt"
"github.com/0xERR0R/blocky/log"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/redis"
"github.com/0xERR0R/blocky/util"
@ -79,7 +80,7 @@ func configureCaches(c *CachingResolver, cfg *config.CachingConfig) {
}
func setupRedisCacheSubscriber(c *CachingResolver) {
logger := logger("caching_resolver")
logger := log.PrefixedLog("caching_resolver")
go func() {
for rc := range c.redisClient.CacheChannel {
@ -101,7 +102,7 @@ func (r *CachingResolver) isPrefetchingDomain(cacheKey string) bool {
func (r *CachingResolver) onExpired(cacheKey string) (val interface{}, ttl time.Duration) {
qType, domainName := util.ExtractCacheKey(cacheKey)
logger := logger("caching_resolver")
logger := log.PrefixedLog("caching_resolver")
if r.isPrefetchingDomain(cacheKey) {
logger.Debugf("prefetching '%s' (%s)", util.Obfuscate(domainName), qType.String())
@ -151,7 +152,7 @@ func (r *CachingResolver) Configuration() (result []string) {
// Resolve checks if the current query result is already in the cache and returns it
// or delegates to the next resolver
func (r *CachingResolver) Resolve(request *model.Request) (response *model.Response, err error) {
logger := withPrefix(request.Log, "caching_resolver")
logger := log.WithPrefix(request.Log, "caching_resolver")
if r.maxCacheTimeSec < 0 {
logger.Debug("skip cache")

View File

@ -8,6 +8,7 @@ import (
"github.com/0xERR0R/blocky/cache/expirationcache"
"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/log"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/util"
@ -101,7 +102,7 @@ func (r *ClientNamesResolver) getClientNames(request *model.Request) []string {
}
}
names := r.resolveClientNames(ip, withPrefix(request.Log, "client_names_resolver"))
names := r.resolveClientNames(ip, log.WithPrefix(request.Log, "client_names_resolver"))
r.cache.Put(ip.String(), names, time.Hour)
return names

View File

@ -5,6 +5,7 @@ import (
"strings"
"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/log"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/util"
@ -82,7 +83,7 @@ func (r *ConditionalUpstreamResolver) processRequest(request *model.Request) (bo
// Resolve uses the conditional resolver to resolve the query
func (r *ConditionalUpstreamResolver) Resolve(request *model.Request) (*model.Response, error) {
logger := withPrefix(request.Log, "conditional_resolver")
logger := log.WithPrefix(request.Log, "conditional_resolver")
if len(r.mapping) > 0 {
resolved, resp, err := r.processRequest(request)
@ -100,7 +101,7 @@ func (r *ConditionalUpstreamResolver) internalResolve(reso Resolver, doFQ, do st
req *model.Request,
) (*model.Response, error) {
// internal request resolution
logger := withPrefix(req.Log, "conditional_resolver")
logger := log.WithPrefix(req.Log, "conditional_resolver")
req.Req.Question[0].Name = dns.Fqdn(doFQ)
response, err := reso.Resolve(req)

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/log"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/util"
@ -89,7 +90,7 @@ func (r *CustomDNSResolver) handleReverseDNS(request *model.Request) *model.Resp
}
func (r *CustomDNSResolver) processRequest(request *model.Request) *model.Response {
logger := withPrefix(request.Log, "custom_dns_resolver")
logger := log.WithPrefix(request.Log, "custom_dns_resolver")
response := new(dns.Msg)
response.SetReply(request.Req)
@ -138,7 +139,7 @@ func (r *CustomDNSResolver) processRequest(request *model.Request) *model.Respon
// Resolve uses internal mapping to resolve the query
func (r *CustomDNSResolver) Resolve(request *model.Request) (*model.Response, error) {
logger := withPrefix(request.Log, "custom_dns_resolver")
logger := log.WithPrefix(request.Log, "custom_dns_resolver")
reverseResp := r.handleReverseDNS(request)
if reverseResp != nil {

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/log"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/util"
"github.com/miekg/dns"
@ -64,7 +65,7 @@ func (r *HostsFileResolver) handleReverseDNS(request *model.Request) *model.Resp
}
func (r *HostsFileResolver) Resolve(request *model.Request) (*model.Response, error) {
logger := withPrefix(request.Log, hostsFileResolverLogger)
logger := log.WithPrefix(request.Log, hostsFileResolverLogger)
if r.HostsFilePath == "" {
return r.next.Resolve(request)
@ -143,7 +144,7 @@ func NewHostsFileResolver(cfg config.HostsFileConfig) *HostsFileResolver {
}
if err := r.parseHostsFile(); err != nil {
logger := logger(hostsFileResolverLogger)
logger := log.PrefixedLog(hostsFileResolverLogger)
logger.Warnf("cannot parse hosts file: %s, hosts file resolving is disabled", r.HostsFilePath)
r.HostsFilePath = ""
} else {
@ -234,7 +235,7 @@ func (r *HostsFileResolver) periodicUpdate() {
for {
<-ticker.C
logger := logger(hostsFileResolverLogger)
logger := log.PrefixedLog(hostsFileResolverLogger)
logger.WithField("file", r.HostsFilePath).Debug("refreshing hosts file")
util.LogOnError("can't refresh hosts file: ", r.parseHostsFile())

View File

@ -10,6 +10,7 @@ import (
"time"
"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/log"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/util"
"github.com/miekg/dns"
@ -78,7 +79,7 @@ func testResolver(r *UpstreamResolver) error {
func NewParallelBestResolver(
upstreamResolvers map[string][]config.Upstream, bootstrap *Bootstrap, shouldVerifyUpstreams bool,
) (Resolver, error) {
logger := logger(parallelResolverLogger)
logger := log.PrefixedLog(parallelResolverLogger)
resolverGroups := make(map[string][]Resolver, len(upstreamResolvers))
@ -205,7 +206,7 @@ func (r *ParallelBestResolver) resolversForClient(request *model.Request) (resul
// Resolve sends the query request to multiple upstream resolvers and returns the fastest result
func (r *ParallelBestResolver) Resolve(request *model.Request) (*model.Response, error) {
logger := request.Log.WithField("prefix", parallelResolverLogger)
logger := log.WithPrefix(request.Log, parallelResolverLogger)
resolvers := r.resolversForClient(request)

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/log"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/querylog"
"github.com/0xERR0R/blocky/util"
@ -32,6 +33,8 @@ type QueryLoggingResolver struct {
// NewQueryLoggingResolver returns a new resolver instance
func NewQueryLoggingResolver(cfg config.QueryLogConfig) ChainedResolver {
logger := log.PrefixedLog(queryLoggingResolverPrefix)
var writer querylog.Writer
logType := cfg.Type
@ -60,11 +63,12 @@ func NewQueryLoggingResolver(cfg config.QueryLogConfig) ChainedResolver {
retry.DelayType(retry.FixedDelay),
retry.Delay(time.Duration(cfg.CreationCooldown)),
retry.OnRetry(func(n uint, err error) {
logger(queryLoggingResolverPrefix).Warnf("Error occurred on query writer creation, "+
"retry attempt %d/%d: %v", n+1, cfg.CreationAttempts, err)
logger.Warnf(
"Error occurred on query writer creation, retry attempt %d/%d: %v", n+1, cfg.CreationAttempts, err,
)
}))
if err != nil {
logger(queryLoggingResolverPrefix).Error("can't create query log writer, using console as fallback: ", err)
logger.Error("can't create query log writer, using console as fallback: ", err)
writer = querylog.NewLoggerWriter()
logType = config.QueryLogTypeConsole
@ -125,7 +129,7 @@ func (r *QueryLoggingResolver) doCleanUp() {
// Resolve logs the query, duration and the result
func (r *QueryLoggingResolver) Resolve(request *model.Request) (*model.Response, error) {
logger := withPrefix(request.Log, queryLoggingResolverPrefix)
logger := log.WithPrefix(request.Log, queryLoggingResolverPrefix)
start := time.Now()
@ -192,7 +196,7 @@ func (r *QueryLoggingResolver) writeLog() {
// if log channel is > 50% full, this could be a problem with slow writer (external storage over network etc.)
if len(r.logChan) > halfCap {
logger(queryLoggingResolverPrefix).WithField("channel_len",
log.PrefixedLog(queryLoggingResolverPrefix).WithField("channel_len",
len(r.logChan)).Warnf("query log writer is too slow, write duration: %d ms", time.Since(start).Milliseconds())
}
}

View File

@ -124,14 +124,6 @@ type NamedResolver interface {
Name() string
}
func logger(prefix string) *logrus.Entry {
return log.PrefixedLog(prefix)
}
func withPrefix(logger *logrus.Entry, prefix string) *logrus.Entry {
return logger.WithField("prefix", prefix)
}
// Chain creates a chain of resolvers
func Chain(resolvers ...Resolver) Resolver {
for i, res := range resolvers {

View File

@ -5,6 +5,7 @@ import (
"strings"
"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/log"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/util"
@ -60,7 +61,7 @@ func (r *RewriterResolver) Configuration() (result []string) {
// Resolve uses the inner resolver to resolve the rewritten query
func (r *RewriterResolver) Resolve(request *model.Request) (*model.Response, error) {
logger := withPrefix(request.Log, "rewriter_resolver")
logger := log.WithPrefix(request.Log, "rewriter_resolver")
original := request.Req

View File

@ -226,7 +226,7 @@ func (r UpstreamResolver) String() string {
// Resolve calls external resolver
func (r *UpstreamResolver) Resolve(request *model.Request) (response *model.Response, err error) {
logger := withPrefix(request.Log, "upstream_resolver")
logger := log.WithPrefix(request.Log, "upstream_resolver")
ips, err := r.bootstrap.UpstreamIPs(r)
if err != nil {