blocky/resolver/query_logging_resolver.go

134 lines
3.3 KiB
Go
Raw Normal View History

2020-01-12 18:23:35 +01:00
package resolver
import (
"fmt"
"time"
2021-08-25 22:06:34 +02:00
"github.com/0xERR0R/blocky/config"
"github.com/0xERR0R/blocky/model"
"github.com/0xERR0R/blocky/querylog"
2020-01-12 18:23:35 +01:00
)
const (
cleanUpRunPeriod = 12 * time.Hour
queryLoggingResolverPrefix = "query_logging_resolver"
logChanCap = 1000
)
2021-09-12 22:26:07 +02:00
// QueryLoggingResolver writes query information (question, answer, duration, ...)
2020-01-12 18:23:35 +01:00
type QueryLoggingResolver struct {
NextResolver
target string
2020-01-12 18:23:35 +01:00
logRetentionDays uint64
logChan chan *querylog.Entry
writer querylog.Writer
2020-01-12 18:23:35 +01:00
}
// NewQueryLoggingResolver returns a new resolver instance
2020-01-12 18:23:35 +01:00
func NewQueryLoggingResolver(cfg config.QueryLogConfig) ChainedResolver {
var writer querylog.Writer
switch cfg.Type {
2021-09-09 22:57:05 +02:00
case config.QueryLogTypeCsv:
writer = querylog.NewCSVWriter(cfg.Target, false, cfg.LogRetentionDays)
2021-09-09 22:57:05 +02:00
case config.QueryLogTypeCsvClient:
writer = querylog.NewCSVWriter(cfg.Target, true, cfg.LogRetentionDays)
2021-09-09 22:57:05 +02:00
case config.QueryLogTypeMysql:
2021-09-12 22:26:07 +02:00
writer = querylog.NewDatabaseWriter(cfg.Target, cfg.LogRetentionDays, 30*time.Second)
2021-11-10 21:54:32 +01:00
case config.QueryLogTypeConsole:
writer = querylog.NewLoggerWriter()
2021-11-10 21:54:32 +01:00
case config.QueryLogTypeNone:
writer = querylog.NewNoneWriter()
2020-01-12 18:23:35 +01:00
}
logChan := make(chan *querylog.Entry, logChanCap)
2020-01-12 18:23:35 +01:00
resolver := QueryLoggingResolver{
target: cfg.Target,
2020-01-12 18:23:35 +01:00
logRetentionDays: cfg.LogRetentionDays,
logChan: logChan,
writer: writer,
2020-01-12 18:23:35 +01:00
}
go resolver.writeLog()
if cfg.LogRetentionDays > 0 {
go resolver.periodicCleanUp()
}
return &resolver
}
// triggers periodically cleanup of old log files
func (r *QueryLoggingResolver) periodicCleanUp() {
ticker := time.NewTicker(cleanUpRunPeriod)
defer ticker.Stop()
for {
<-ticker.C
r.doCleanUp()
}
}
func (r *QueryLoggingResolver) doCleanUp() {
r.writer.CleanUp()
2020-01-12 18:23:35 +01:00
}
// Resolve logs the query, duration and the result
func (r *QueryLoggingResolver) Resolve(request *model.Request) (*model.Response, error) {
2020-01-12 18:23:35 +01:00
logger := withPrefix(request.Log, queryLoggingResolverPrefix)
start := time.Now()
resp, err := r.next.Resolve(request)
duration := time.Since(start).Milliseconds()
if err == nil {
select {
case r.logChan <- &querylog.Entry{
Request: request,
Response: resp,
Start: start,
DurationMs: duration}:
2020-01-12 18:23:35 +01:00
default:
logger.Error("query log writer is too slow, log entry will be dropped")
}
}
return resp, err
}
// write entry: if log directory is configured, write to log file
func (r *QueryLoggingResolver) writeLog() {
for logEntry := range r.logChan {
start := time.Now()
2020-01-12 18:23:35 +01:00
r.writer.Write(logEntry)
2020-01-12 18:23:35 +01:00
halfCap := cap(r.logChan) / 2
2021-01-19 21:52:24 +01:00
// if log channel is > 50% full, this could be a problem with slow writer (external storage over network etc.)
if len(r.logChan) > halfCap {
logger(queryLoggingResolverPrefix).WithField("channel_len",
len(r.logChan)).Warnf("query log writer is too slow, write duration: %d ms", time.Since(start).Milliseconds())
2020-01-12 18:23:35 +01:00
}
}
}
// Configuration returns the current resolver configuration
2020-01-12 18:23:35 +01:00
func (r *QueryLoggingResolver) Configuration() (result []string) {
if r.target != "" {
result = append(result, fmt.Sprintf("target: \"%s\"", r.target))
result = append(result, fmt.Sprintf("logRetentionDays: %d", r.logRetentionDays))
2020-01-12 18:23:35 +01:00
if r.logRetentionDays == 0 {
result = append(result, "log cleanup deactivated")
}
} else {
result = []string{"deactivated"}
}
return
}