From 7c7683637381ab907a22a8811aaf223bf05cd5c1 Mon Sep 17 00:00:00 2001 From: ThinkChaos Date: Fri, 2 Dec 2022 22:21:17 -0500 Subject: [PATCH] feat: stack log prefixes to differentiate between log emitters The goal is to be able to tell apart logs from difference sources, such as `bootstrap.parallel_best_resolver` and `parallel_best_resolver`. --- log/logger.go | 14 +++++++++++++- resolver/blocking_resolver.go | 4 ++-- resolver/caching_resolver.go | 7 ++++--- resolver/client_names_resolver.go | 3 ++- resolver/conditional_upstream_resolver.go | 5 +++-- resolver/custom_dns_resolver.go | 5 +++-- resolver/hosts_file_resolver.go | 7 ++++--- resolver/parallel_best_resolver.go | 5 +++-- resolver/query_logging_resolver.go | 14 +++++++++----- resolver/resolver.go | 8 -------- resolver/rewriter_resolver.go | 3 ++- resolver/upstream_resolver.go | 2 +- 12 files changed, 46 insertions(+), 31 deletions(-) diff --git a/log/logger.go b/log/logger.go index c399a09f..53d27839 100644 --- a/log/logger.go +++ b/log/logger.go @@ -3,6 +3,7 @@ package log //go:generate go run github.com/abice/go-enum -f=$GOFILE --marshal --names import ( + "fmt" "io" "strings" @@ -10,6 +11,8 @@ import ( prefixed "github.com/x-cray/logrus-prefixed-formatter" ) +const prefixField = "prefix" + // Logger is the global logging instance // //nolint:gochecknoglobals @@ -60,7 +63,16 @@ func Log() *logrus.Logger { // PrefixedLog return the global logger with prefix func PrefixedLog(prefix string) *logrus.Entry { - return logger.WithField("prefix", prefix) + return logger.WithField(prefixField, prefix) +} + +// WithPrefix adds the given prefix to the logger. +func WithPrefix(logger *logrus.Entry, prefix string) *logrus.Entry { + if existingPrefix, ok := logger.Data[prefixField]; ok { + prefix = fmt.Sprintf("%s.%s", existingPrefix, prefix) + } + + return logger.WithField(prefixField, prefix) } // EscapeInput removes line breaks from input diff --git a/resolver/blocking_resolver.go b/resolver/blocking_resolver.go index 7f903fb1..15b63fd0 100644 --- a/resolver/blocking_resolver.go +++ b/resolver/blocking_resolver.go @@ -164,7 +164,7 @@ func createDownloader(cfg config.BlockingConfig, bootstrap *Bootstrap) *lists.HT } func setupRedisEnabledSubscriber(c *BlockingResolver) { - logger := logger("blocking_resolver") + logger := log.PrefixedLog("blocking_resolver") go func() { for em := range c.redisClient.EnabledChannel { @@ -410,7 +410,7 @@ func (r *BlockingResolver) handleBlacklist(groupsToCheck []string, // Resolve checks the query against the blacklist and delegates to next resolver if domain is not blocked func (r *BlockingResolver) Resolve(request *model.Request) (*model.Response, error) { - logger := withPrefix(request.Log, "blacklist_resolver") + logger := log.WithPrefix(request.Log, "blacklist_resolver") groupsToCheck := r.groupsToCheckForClient(request) if len(groupsToCheck) > 0 { diff --git a/resolver/caching_resolver.go b/resolver/caching_resolver.go index 9ae5dc5c..8a2eacd3 100644 --- a/resolver/caching_resolver.go +++ b/resolver/caching_resolver.go @@ -10,6 +10,7 @@ import ( "github.com/0xERR0R/blocky/cache/expirationcache" "github.com/0xERR0R/blocky/config" "github.com/0xERR0R/blocky/evt" + "github.com/0xERR0R/blocky/log" "github.com/0xERR0R/blocky/model" "github.com/0xERR0R/blocky/redis" "github.com/0xERR0R/blocky/util" @@ -79,7 +80,7 @@ func configureCaches(c *CachingResolver, cfg *config.CachingConfig) { } func setupRedisCacheSubscriber(c *CachingResolver) { - logger := logger("caching_resolver") + logger := log.PrefixedLog("caching_resolver") go func() { for rc := range c.redisClient.CacheChannel { @@ -101,7 +102,7 @@ func (r *CachingResolver) isPrefetchingDomain(cacheKey string) bool { func (r *CachingResolver) onExpired(cacheKey string) (val interface{}, ttl time.Duration) { qType, domainName := util.ExtractCacheKey(cacheKey) - logger := logger("caching_resolver") + logger := log.PrefixedLog("caching_resolver") if r.isPrefetchingDomain(cacheKey) { logger.Debugf("prefetching '%s' (%s)", util.Obfuscate(domainName), qType.String()) @@ -151,7 +152,7 @@ func (r *CachingResolver) Configuration() (result []string) { // Resolve checks if the current query result is already in the cache and returns it // or delegates to the next resolver func (r *CachingResolver) Resolve(request *model.Request) (response *model.Response, err error) { - logger := withPrefix(request.Log, "caching_resolver") + logger := log.WithPrefix(request.Log, "caching_resolver") if r.maxCacheTimeSec < 0 { logger.Debug("skip cache") diff --git a/resolver/client_names_resolver.go b/resolver/client_names_resolver.go index abd25b0e..b5bcd2ae 100644 --- a/resolver/client_names_resolver.go +++ b/resolver/client_names_resolver.go @@ -8,6 +8,7 @@ import ( "github.com/0xERR0R/blocky/cache/expirationcache" "github.com/0xERR0R/blocky/config" + "github.com/0xERR0R/blocky/log" "github.com/0xERR0R/blocky/model" "github.com/0xERR0R/blocky/util" @@ -101,7 +102,7 @@ func (r *ClientNamesResolver) getClientNames(request *model.Request) []string { } } - names := r.resolveClientNames(ip, withPrefix(request.Log, "client_names_resolver")) + names := r.resolveClientNames(ip, log.WithPrefix(request.Log, "client_names_resolver")) r.cache.Put(ip.String(), names, time.Hour) return names diff --git a/resolver/conditional_upstream_resolver.go b/resolver/conditional_upstream_resolver.go index 5b722047..bf1492aa 100644 --- a/resolver/conditional_upstream_resolver.go +++ b/resolver/conditional_upstream_resolver.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/0xERR0R/blocky/config" + "github.com/0xERR0R/blocky/log" "github.com/0xERR0R/blocky/model" "github.com/0xERR0R/blocky/util" @@ -82,7 +83,7 @@ func (r *ConditionalUpstreamResolver) processRequest(request *model.Request) (bo // Resolve uses the conditional resolver to resolve the query func (r *ConditionalUpstreamResolver) Resolve(request *model.Request) (*model.Response, error) { - logger := withPrefix(request.Log, "conditional_resolver") + logger := log.WithPrefix(request.Log, "conditional_resolver") if len(r.mapping) > 0 { resolved, resp, err := r.processRequest(request) @@ -100,7 +101,7 @@ func (r *ConditionalUpstreamResolver) internalResolve(reso Resolver, doFQ, do st req *model.Request, ) (*model.Response, error) { // internal request resolution - logger := withPrefix(req.Log, "conditional_resolver") + logger := log.WithPrefix(req.Log, "conditional_resolver") req.Req.Question[0].Name = dns.Fqdn(doFQ) response, err := reso.Resolve(req) diff --git a/resolver/custom_dns_resolver.go b/resolver/custom_dns_resolver.go index 7feb26e2..333a137d 100644 --- a/resolver/custom_dns_resolver.go +++ b/resolver/custom_dns_resolver.go @@ -7,6 +7,7 @@ import ( "time" "github.com/0xERR0R/blocky/config" + "github.com/0xERR0R/blocky/log" "github.com/0xERR0R/blocky/model" "github.com/0xERR0R/blocky/util" @@ -89,7 +90,7 @@ func (r *CustomDNSResolver) handleReverseDNS(request *model.Request) *model.Resp } func (r *CustomDNSResolver) processRequest(request *model.Request) *model.Response { - logger := withPrefix(request.Log, "custom_dns_resolver") + logger := log.WithPrefix(request.Log, "custom_dns_resolver") response := new(dns.Msg) response.SetReply(request.Req) @@ -138,7 +139,7 @@ func (r *CustomDNSResolver) processRequest(request *model.Request) *model.Respon // Resolve uses internal mapping to resolve the query func (r *CustomDNSResolver) Resolve(request *model.Request) (*model.Response, error) { - logger := withPrefix(request.Log, "custom_dns_resolver") + logger := log.WithPrefix(request.Log, "custom_dns_resolver") reverseResp := r.handleReverseDNS(request) if reverseResp != nil { diff --git a/resolver/hosts_file_resolver.go b/resolver/hosts_file_resolver.go index bb9f7983..d8c3772d 100644 --- a/resolver/hosts_file_resolver.go +++ b/resolver/hosts_file_resolver.go @@ -8,6 +8,7 @@ import ( "time" "github.com/0xERR0R/blocky/config" + "github.com/0xERR0R/blocky/log" "github.com/0xERR0R/blocky/model" "github.com/0xERR0R/blocky/util" "github.com/miekg/dns" @@ -64,7 +65,7 @@ func (r *HostsFileResolver) handleReverseDNS(request *model.Request) *model.Resp } func (r *HostsFileResolver) Resolve(request *model.Request) (*model.Response, error) { - logger := withPrefix(request.Log, hostsFileResolverLogger) + logger := log.WithPrefix(request.Log, hostsFileResolverLogger) if r.HostsFilePath == "" { return r.next.Resolve(request) @@ -143,7 +144,7 @@ func NewHostsFileResolver(cfg config.HostsFileConfig) *HostsFileResolver { } if err := r.parseHostsFile(); err != nil { - logger := logger(hostsFileResolverLogger) + logger := log.PrefixedLog(hostsFileResolverLogger) logger.Warnf("cannot parse hosts file: %s, hosts file resolving is disabled", r.HostsFilePath) r.HostsFilePath = "" } else { @@ -234,7 +235,7 @@ func (r *HostsFileResolver) periodicUpdate() { for { <-ticker.C - logger := logger(hostsFileResolverLogger) + logger := log.PrefixedLog(hostsFileResolverLogger) logger.WithField("file", r.HostsFilePath).Debug("refreshing hosts file") util.LogOnError("can't refresh hosts file: ", r.parseHostsFile()) diff --git a/resolver/parallel_best_resolver.go b/resolver/parallel_best_resolver.go index 9866addf..60fc0df9 100644 --- a/resolver/parallel_best_resolver.go +++ b/resolver/parallel_best_resolver.go @@ -10,6 +10,7 @@ import ( "time" "github.com/0xERR0R/blocky/config" + "github.com/0xERR0R/blocky/log" "github.com/0xERR0R/blocky/model" "github.com/0xERR0R/blocky/util" "github.com/miekg/dns" @@ -78,7 +79,7 @@ func testResolver(r *UpstreamResolver) error { func NewParallelBestResolver( upstreamResolvers map[string][]config.Upstream, bootstrap *Bootstrap, shouldVerifyUpstreams bool, ) (Resolver, error) { - logger := logger(parallelResolverLogger) + logger := log.PrefixedLog(parallelResolverLogger) resolverGroups := make(map[string][]Resolver, len(upstreamResolvers)) @@ -205,7 +206,7 @@ func (r *ParallelBestResolver) resolversForClient(request *model.Request) (resul // Resolve sends the query request to multiple upstream resolvers and returns the fastest result func (r *ParallelBestResolver) Resolve(request *model.Request) (*model.Response, error) { - logger := request.Log.WithField("prefix", parallelResolverLogger) + logger := log.WithPrefix(request.Log, parallelResolverLogger) resolvers := r.resolversForClient(request) diff --git a/resolver/query_logging_resolver.go b/resolver/query_logging_resolver.go index 862032dc..a06d5694 100644 --- a/resolver/query_logging_resolver.go +++ b/resolver/query_logging_resolver.go @@ -5,6 +5,7 @@ import ( "time" "github.com/0xERR0R/blocky/config" + "github.com/0xERR0R/blocky/log" "github.com/0xERR0R/blocky/model" "github.com/0xERR0R/blocky/querylog" "github.com/0xERR0R/blocky/util" @@ -32,6 +33,8 @@ type QueryLoggingResolver struct { // NewQueryLoggingResolver returns a new resolver instance func NewQueryLoggingResolver(cfg config.QueryLogConfig) ChainedResolver { + logger := log.PrefixedLog(queryLoggingResolverPrefix) + var writer querylog.Writer logType := cfg.Type @@ -60,11 +63,12 @@ func NewQueryLoggingResolver(cfg config.QueryLogConfig) ChainedResolver { retry.DelayType(retry.FixedDelay), retry.Delay(time.Duration(cfg.CreationCooldown)), retry.OnRetry(func(n uint, err error) { - logger(queryLoggingResolverPrefix).Warnf("Error occurred on query writer creation, "+ - "retry attempt %d/%d: %v", n+1, cfg.CreationAttempts, err) + logger.Warnf( + "Error occurred on query writer creation, retry attempt %d/%d: %v", n+1, cfg.CreationAttempts, err, + ) })) if err != nil { - logger(queryLoggingResolverPrefix).Error("can't create query log writer, using console as fallback: ", err) + logger.Error("can't create query log writer, using console as fallback: ", err) writer = querylog.NewLoggerWriter() logType = config.QueryLogTypeConsole @@ -125,7 +129,7 @@ func (r *QueryLoggingResolver) doCleanUp() { // Resolve logs the query, duration and the result func (r *QueryLoggingResolver) Resolve(request *model.Request) (*model.Response, error) { - logger := withPrefix(request.Log, queryLoggingResolverPrefix) + logger := log.WithPrefix(request.Log, queryLoggingResolverPrefix) start := time.Now() @@ -192,7 +196,7 @@ func (r *QueryLoggingResolver) writeLog() { // if log channel is > 50% full, this could be a problem with slow writer (external storage over network etc.) if len(r.logChan) > halfCap { - logger(queryLoggingResolverPrefix).WithField("channel_len", + log.PrefixedLog(queryLoggingResolverPrefix).WithField("channel_len", len(r.logChan)).Warnf("query log writer is too slow, write duration: %d ms", time.Since(start).Milliseconds()) } } diff --git a/resolver/resolver.go b/resolver/resolver.go index 075f9757..f3f58cb8 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -124,14 +124,6 @@ type NamedResolver interface { Name() string } -func logger(prefix string) *logrus.Entry { - return log.PrefixedLog(prefix) -} - -func withPrefix(logger *logrus.Entry, prefix string) *logrus.Entry { - return logger.WithField("prefix", prefix) -} - // Chain creates a chain of resolvers func Chain(resolvers ...Resolver) Resolver { for i, res := range resolvers { diff --git a/resolver/rewriter_resolver.go b/resolver/rewriter_resolver.go index 57f37b5c..d8cabfd2 100644 --- a/resolver/rewriter_resolver.go +++ b/resolver/rewriter_resolver.go @@ -5,6 +5,7 @@ import ( "strings" "github.com/0xERR0R/blocky/config" + "github.com/0xERR0R/blocky/log" "github.com/0xERR0R/blocky/model" "github.com/0xERR0R/blocky/util" @@ -60,7 +61,7 @@ func (r *RewriterResolver) Configuration() (result []string) { // Resolve uses the inner resolver to resolve the rewritten query func (r *RewriterResolver) Resolve(request *model.Request) (*model.Response, error) { - logger := withPrefix(request.Log, "rewriter_resolver") + logger := log.WithPrefix(request.Log, "rewriter_resolver") original := request.Req diff --git a/resolver/upstream_resolver.go b/resolver/upstream_resolver.go index 6ca53f71..3f37495a 100644 --- a/resolver/upstream_resolver.go +++ b/resolver/upstream_resolver.go @@ -226,7 +226,7 @@ func (r UpstreamResolver) String() string { // Resolve calls external resolver func (r *UpstreamResolver) Resolve(request *model.Request) (response *model.Response, err error) { - logger := withPrefix(request.Log, "upstream_resolver") + logger := log.WithPrefix(request.Log, "upstream_resolver") ips, err := r.bootstrap.UpstreamIPs(r) if err != nil {