2020-01-12 18:23:35 +01:00
|
|
|
package resolver
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/csv"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2020-01-17 21:52:41 +01:00
|
|
|
"regexp"
|
2020-01-12 18:23:35 +01:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2021-08-25 22:06:34 +02:00
|
|
|
"github.com/0xERR0R/blocky/config"
|
2021-08-29 21:51:24 +02:00
|
|
|
"github.com/0xERR0R/blocky/model"
|
2021-08-25 22:06:34 +02:00
|
|
|
"github.com/0xERR0R/blocky/util"
|
|
|
|
|
2020-01-12 18:23:35 +01:00
|
|
|
"github.com/miekg/dns"
|
|
|
|
"github.com/sirupsen/logrus"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
cleanUpRunPeriod = 12 * time.Hour
|
|
|
|
queryLoggingResolverPrefix = "query_logging_resolver"
|
|
|
|
logChanCap = 1000
|
|
|
|
)
|
|
|
|
|
|
|
|
// QueryLoggingResolver writes query information (question, answer, duration, ...) into
|
|
|
|
// log file or as log entry (if log directory is not configured)
|
|
|
|
type QueryLoggingResolver struct {
|
|
|
|
NextResolver
|
|
|
|
logDir string
|
|
|
|
perClient bool
|
|
|
|
logRetentionDays uint64
|
|
|
|
logChan chan *queryLogEntry
|
|
|
|
}
|
|
|
|
|
|
|
|
type queryLogEntry struct {
|
2021-08-29 21:51:24 +02:00
|
|
|
request *model.Request
|
|
|
|
response *model.Response
|
2020-01-12 18:23:35 +01:00
|
|
|
start time.Time
|
|
|
|
durationMs int64
|
|
|
|
logger *logrus.Entry
|
|
|
|
}
|
|
|
|
|
2021-02-26 13:45:57 +01:00
|
|
|
// NewQueryLoggingResolver returns a new resolver instance
|
2020-01-12 18:23:35 +01:00
|
|
|
func NewQueryLoggingResolver(cfg config.QueryLogConfig) ChainedResolver {
|
2020-05-24 22:42:43 +02:00
|
|
|
if _, err := os.Stat(cfg.Dir); cfg.Dir != "" && err != nil && os.IsNotExist(err) {
|
2020-01-12 18:23:35 +01:00
|
|
|
logger(queryLoggingResolverPrefix).Fatalf("query log directory '%s' does not exist or is not writable", cfg.Dir)
|
|
|
|
}
|
|
|
|
|
|
|
|
logChan := make(chan *queryLogEntry, logChanCap)
|
|
|
|
|
|
|
|
resolver := QueryLoggingResolver{
|
|
|
|
logDir: cfg.Dir,
|
|
|
|
perClient: cfg.PerClient,
|
|
|
|
logRetentionDays: cfg.LogRetentionDays,
|
|
|
|
logChan: logChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
go resolver.writeLog()
|
|
|
|
|
|
|
|
if cfg.LogRetentionDays > 0 {
|
|
|
|
go resolver.periodicCleanUp()
|
|
|
|
}
|
|
|
|
|
|
|
|
return &resolver
|
|
|
|
}
|
|
|
|
|
|
|
|
// triggers periodically cleanup of old log files
|
|
|
|
func (r *QueryLoggingResolver) periodicCleanUp() {
|
|
|
|
ticker := time.NewTicker(cleanUpRunPeriod)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
<-ticker.C
|
|
|
|
r.doCleanUp()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// deletes old log files
|
|
|
|
func (r *QueryLoggingResolver) doCleanUp() {
|
|
|
|
logger := logger(queryLoggingResolverPrefix)
|
|
|
|
|
|
|
|
logger.Trace("starting clean up")
|
|
|
|
|
|
|
|
files, err := ioutil.ReadDir(r.logDir)
|
|
|
|
if err != nil {
|
|
|
|
logger.WithField("log_dir", r.logDir).Error("can't list log directory: ", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// search for log files, which names starts with date
|
|
|
|
for _, f := range files {
|
|
|
|
if strings.HasSuffix(f.Name(), ".log") && len(f.Name()) > 10 {
|
|
|
|
t, err := time.Parse("2006-01-02", f.Name()[:10])
|
|
|
|
if err == nil {
|
|
|
|
differenceDays := uint64(time.Since(t).Hours() / 24)
|
|
|
|
if r.logRetentionDays > 0 && differenceDays > r.logRetentionDays {
|
|
|
|
logger.WithFields(logrus.Fields{
|
|
|
|
"file": f.Name(),
|
|
|
|
"ageInDays": differenceDays,
|
|
|
|
"logRetentionDays": r.logRetentionDays,
|
|
|
|
}).Info("existing log file is older than retention time and will be deleted")
|
|
|
|
|
|
|
|
err := os.Remove(filepath.Join(r.logDir, f.Name()))
|
2021-01-19 21:52:24 +01:00
|
|
|
util.LogOnErrorWithEntry(logger.WithField("file", f.Name()), "can't remove file: ", err)
|
2020-01-12 18:23:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-26 13:45:57 +01:00
|
|
|
// Resolve logs the query, duration and the result
|
2021-08-29 21:51:24 +02:00
|
|
|
func (r *QueryLoggingResolver) Resolve(request *model.Request) (*model.Response, error) {
|
2020-01-12 18:23:35 +01:00
|
|
|
logger := withPrefix(request.Log, queryLoggingResolverPrefix)
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
resp, err := r.next.Resolve(request)
|
|
|
|
|
|
|
|
duration := time.Since(start).Milliseconds()
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
select {
|
|
|
|
case r.logChan <- &queryLogEntry{
|
|
|
|
request: request,
|
|
|
|
response: resp,
|
|
|
|
start: start,
|
|
|
|
durationMs: duration,
|
|
|
|
logger: logger}:
|
|
|
|
default:
|
|
|
|
logger.Error("query log writer is too slow, log entry will be dropped")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// write entry: if log directory is configured, write to log file
|
|
|
|
func (r *QueryLoggingResolver) writeLog() {
|
|
|
|
for logEntry := range r.logChan {
|
|
|
|
if r.logDir != "" {
|
|
|
|
var clientPrefix string
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
|
|
|
|
dateString := logEntry.start.Format("2006-01-02")
|
|
|
|
|
|
|
|
if r.perClient {
|
|
|
|
clientPrefix = strings.Join(logEntry.request.ClientNames, "-")
|
|
|
|
} else {
|
|
|
|
clientPrefix = "ALL"
|
|
|
|
}
|
|
|
|
|
2020-01-17 21:52:41 +01:00
|
|
|
fileName := fmt.Sprintf("%s_%s.log", dateString, escape(clientPrefix))
|
|
|
|
writePath := filepath.Join(r.logDir, fileName)
|
2020-01-12 18:23:35 +01:00
|
|
|
|
|
|
|
file, err := os.OpenFile(writePath, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)
|
|
|
|
|
2021-01-19 21:52:24 +01:00
|
|
|
util.LogOnErrorWithEntry(logEntry.logger.WithField("file_name", writePath), "can't create/open file", err)
|
|
|
|
|
|
|
|
if err == nil {
|
2020-01-12 18:23:35 +01:00
|
|
|
writer := createCsvWriter(file)
|
|
|
|
|
|
|
|
err := writer.Write(createQueryLogRow(logEntry))
|
2021-01-19 21:52:24 +01:00
|
|
|
util.LogOnErrorWithEntry(logEntry.logger.WithField("file_name", writePath), "can't write to file", err)
|
2020-01-12 18:23:35 +01:00
|
|
|
writer.Flush()
|
2020-03-06 23:00:27 +01:00
|
|
|
|
2020-05-04 22:20:13 +02:00
|
|
|
_ = file.Close()
|
2020-01-12 18:23:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
halfCap := cap(r.logChan) / 2
|
|
|
|
|
|
|
|
// if log channel is > 50% full, this could be a problem with slow writer (external storage over network etc.)
|
|
|
|
if len(r.logChan) > halfCap {
|
|
|
|
logEntry.logger.WithField("channel_len",
|
|
|
|
len(r.logChan)).Warnf("query log writer is too slow, write duration: %d ms", time.Since(start).Milliseconds())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
logEntry.logger.WithFields(
|
|
|
|
logrus.Fields{
|
|
|
|
"response_reason": logEntry.response.Reason,
|
2020-01-25 22:20:09 +01:00
|
|
|
"response_code": dns.RcodeToString[logEntry.response.Res.Rcode],
|
2020-01-12 18:23:35 +01:00
|
|
|
"answer": util.AnswerToString(logEntry.response.Res.Answer),
|
|
|
|
"duration_ms": logEntry.durationMs,
|
|
|
|
},
|
|
|
|
).Infof("query resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-17 21:52:41 +01:00
|
|
|
func escape(file string) string {
|
|
|
|
reg := regexp.MustCompile("[^a-zA-Z0-9-_]+")
|
|
|
|
return reg.ReplaceAllString(file, "_")
|
|
|
|
}
|
|
|
|
|
2020-01-12 18:23:35 +01:00
|
|
|
func createCsvWriter(file io.Writer) *csv.Writer {
|
|
|
|
writer := csv.NewWriter(file)
|
|
|
|
writer.Comma = '\t'
|
|
|
|
|
|
|
|
return writer
|
|
|
|
}
|
|
|
|
|
|
|
|
func createQueryLogRow(logEntry *queryLogEntry) []string {
|
|
|
|
request := logEntry.request
|
|
|
|
response := logEntry.response
|
|
|
|
|
|
|
|
return []string{
|
|
|
|
logEntry.start.Format("2006-01-02 15:04:05"),
|
|
|
|
request.ClientIP.String(),
|
|
|
|
strings.Join(request.ClientNames, "; "),
|
|
|
|
fmt.Sprintf("%d", logEntry.durationMs),
|
|
|
|
response.Reason,
|
|
|
|
util.QuestionToString(request.Req.Question),
|
|
|
|
util.AnswerToString(response.Res.Answer),
|
|
|
|
dns.RcodeToString[response.Res.Rcode],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-26 13:45:57 +01:00
|
|
|
// Configuration returns the current resolver configuration
|
2020-01-12 18:23:35 +01:00
|
|
|
func (r *QueryLoggingResolver) Configuration() (result []string) {
|
|
|
|
if r.logDir != "" {
|
|
|
|
result = append(result, fmt.Sprintf("logDir= \"%s\"", r.logDir))
|
|
|
|
result = append(result, fmt.Sprintf("perClient = %t", r.perClient))
|
|
|
|
result = append(result, fmt.Sprintf("logRetentionDays= %d", r.logRetentionDays))
|
|
|
|
|
|
|
|
if r.logRetentionDays == 0 {
|
|
|
|
result = append(result, "log cleanup deactivated")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
result = []string{"deactivated"}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|