feat: new parameter "StartStrategy" (#566) (#636)

Blocky should start resolving DNS traffic as soon as possible
This commit is contained in:
Kwitsch 2022-09-03 22:12:07 +02:00 committed by GitHub
parent 8180858efc
commit fd93f67899
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 288 additions and 89 deletions

View File

@ -46,6 +46,13 @@ type NetProtocol uint16
// )
type QueryLogType int16
// StartStrategyType upstart strategy ENUM(
// blocking // synchronously download blocking lists on startup
// failOnError // synchronously download blocking lists on startup and shutdown on error
// fast // asyncronously download blocking lists on startup
// )
type StartStrategyType uint16
type QType dns.Type
func (c QType) String() string {
@ -480,17 +487,19 @@ type ConditionalUpstreamMapping struct {
// BlockingConfig configuration for query blocking
type BlockingConfig struct {
BlackLists map[string][]string `yaml:"blackLists"`
WhiteLists map[string][]string `yaml:"whiteLists"`
ClientGroupsBlock map[string][]string `yaml:"clientGroupsBlock"`
BlockType string `yaml:"blockType" default:"ZEROIP"`
BlockTTL Duration `yaml:"blockTTL" default:"6h"`
DownloadTimeout Duration `yaml:"downloadTimeout" default:"60s"`
DownloadAttempts uint `yaml:"downloadAttempts" default:"3"`
DownloadCooldown Duration `yaml:"downloadCooldown" default:"1s"`
RefreshPeriod Duration `yaml:"refreshPeriod" default:"4h"`
FailStartOnListError bool `yaml:"failStartOnListError" default:"false"`
ProcessingConcurrency uint `yaml:"processingConcurrency" default:"4"`
BlackLists map[string][]string `yaml:"blackLists"`
WhiteLists map[string][]string `yaml:"whiteLists"`
ClientGroupsBlock map[string][]string `yaml:"clientGroupsBlock"`
BlockType string `yaml:"blockType" default:"ZEROIP"`
BlockTTL Duration `yaml:"blockTTL" default:"6h"`
DownloadTimeout Duration `yaml:"downloadTimeout" default:"60s"`
DownloadAttempts uint `yaml:"downloadAttempts" default:"3"`
DownloadCooldown Duration `yaml:"downloadCooldown" default:"1s"`
RefreshPeriod Duration `yaml:"refreshPeriod" default:"4h"`
// Deprecated
FailStartOnListError bool `yaml:"failStartOnListError" default:"false"`
ProcessingConcurrency uint `yaml:"processingConcurrency" default:"4"`
StartStrategy StartStrategyType `yaml:"startStrategy" default:"blocking"`
}
// ClientLookupConfig configuration for the client lookup
@ -641,6 +650,17 @@ func validateConfig(cfg *Config) {
cfg.Filtering.QueryTypes.Insert(dns.Type(dns.TypeAAAA))
}
if cfg.Blocking.FailStartOnListError {
log.Log().Warnf("'blocking.failStartOnListError' is deprecated. Please use 'blocking.startStrategy'" +
" with 'failOnError' instead.")
if cfg.Blocking.StartStrategy == StartStrategyTypeBlocking {
cfg.Blocking.StartStrategy = StartStrategyTypeFailOnError
} else if cfg.Blocking.StartStrategy == StartStrategyTypeFast {
log.Log().Warnf("'blocking.startStrategy' with 'fast' will ignore 'blocking.failStartOnListError'.")
}
}
}
// GetConfig returns the current config

View File

@ -170,3 +170,74 @@ func (x *QueryLogType) UnmarshalText(text []byte) error {
*x = tmp
return nil
}
const (
// StartStrategyTypeBlocking is a StartStrategyType of type Blocking.
// synchronously download blocking lists on startup
StartStrategyTypeBlocking StartStrategyType = iota
// StartStrategyTypeFailOnError is a StartStrategyType of type FailOnError.
// synchronously download blocking lists on startup and shutdown on error
StartStrategyTypeFailOnError
// StartStrategyTypeFast is a StartStrategyType of type Fast.
// asyncronously download blocking lists on startup
StartStrategyTypeFast
)
const _StartStrategyTypeName = "blockingfailOnErrorfast"
var _StartStrategyTypeNames = []string{
_StartStrategyTypeName[0:8],
_StartStrategyTypeName[8:19],
_StartStrategyTypeName[19:23],
}
// StartStrategyTypeNames returns a list of possible string values of StartStrategyType.
func StartStrategyTypeNames() []string {
tmp := make([]string, len(_StartStrategyTypeNames))
copy(tmp, _StartStrategyTypeNames)
return tmp
}
var _StartStrategyTypeMap = map[StartStrategyType]string{
StartStrategyTypeBlocking: _StartStrategyTypeName[0:8],
StartStrategyTypeFailOnError: _StartStrategyTypeName[8:19],
StartStrategyTypeFast: _StartStrategyTypeName[19:23],
}
// String implements the Stringer interface.
func (x StartStrategyType) String() string {
if str, ok := _StartStrategyTypeMap[x]; ok {
return str
}
return fmt.Sprintf("StartStrategyType(%d)", x)
}
var _StartStrategyTypeValue = map[string]StartStrategyType{
_StartStrategyTypeName[0:8]: StartStrategyTypeBlocking,
_StartStrategyTypeName[8:19]: StartStrategyTypeFailOnError,
_StartStrategyTypeName[19:23]: StartStrategyTypeFast,
}
// ParseStartStrategyType attempts to convert a string to a StartStrategyType.
func ParseStartStrategyType(name string) (StartStrategyType, error) {
if x, ok := _StartStrategyTypeValue[name]; ok {
return x, nil
}
return StartStrategyType(0), fmt.Errorf("%s is not a valid StartStrategyType, try [%s]", name, strings.Join(_StartStrategyTypeNames, ", "))
}
// MarshalText implements the text marshaller method.
func (x StartStrategyType) MarshalText() ([]byte, error) {
return []byte(x.String()), nil
}
// UnmarshalText implements the text unmarshaller method.
func (x *StartStrategyType) UnmarshalText(text []byte) error {
name := string(text)
tmp, err := ParseStartStrategyType(name)
if err != nil {
return err
}
*x = tmp
return nil
}

View File

@ -179,6 +179,29 @@ bootstrapDns:
})
})
When("Deprecated parameter 'failStartOnListError' is set", func() {
var (
c Config
)
BeforeEach(func() {
c = Config{
Blocking: BlockingConfig{
FailStartOnListError: true,
StartStrategy: StartStrategyTypeBlocking,
},
}
})
It("should change StartStrategy blocking to failOnError", func() {
validateConfig(&c)
Expect(c.Blocking.StartStrategy).Should(Equal(StartStrategyTypeFailOnError))
})
It("shouldn't change StartStrategy if set to fast", func() {
c.Blocking.StartStrategy = StartStrategyTypeFast
validateConfig(&c)
Expect(c.Blocking.StartStrategy).Should(Equal(StartStrategyTypeFast))
})
})
When("config directory does not exist", func() {
It("should return error", func() {
_, err = LoadConfig(tmpDir.JoinPath("config.yml"), true)

View File

@ -104,8 +104,8 @@ blocking:
downloadAttempts: 5
# optional: Time between the download attempts. Default: 1s
downloadCooldown: 10s
# optional: if true, application startup will fail if at least one list can't be downloaded / opened. Default: false
failStartOnListError: false
# optional: if failOnError, application startup will fail if at least one list can't be downloaded / opened. Default: blocking
startStrategy: failOnError
# optional: configuration for caching of DNS responses
caching:

View File

@ -209,7 +209,8 @@ hostname belongs to which IP address, all DNS queries for the local network shou
The optional parameter `rewrite` behaves the same as with custom DNS.
The optional parameter fallbackUpstream, if false (default), return empty result if after rewrite, the mapped resolver returned an empty answer. If true, the original query will be sent to the upstream resolver.
# Usage: One usecase when having split DNS for internal and external (internet facing) users, but not all subdomains are listed in the internal domain.
### Usage: One usecase when having split DNS for internal and external (internet facing) users, but not all subdomains are listed in the internal domain
!!! example
@ -456,16 +457,22 @@ You can configure the list download attempts according to your internet connecti
downloadCooldown: 10s
```
### Fail on start
### Start strategy
You can ensure with parameter `failStartOnListError = true` that the application will fail if at least one list can't be
downloaded or opened. Default value is `false`.
You can configure the blocking behavior during application start of blocky.
If no starategy is selected blocking will be used.
| startStrategy | Description |
|---------------|-------------------------------------------------------------------------------------------------------|
| blocking | all blocking lists will be loaded before DNS resoulution starts |
| failOnError | like blocking but blocky shutsdown if an download fails |
| fast | DNS resolution starts immediately without blocking which will be enabled after list load is completed |
!!! example
```yaml
blocking:
failStartOnListError: false
startStrategy: failOnError
```
### Concurrency
@ -474,7 +481,7 @@ Blocky downloads and processes links in a single group concurrently. With parame
how many links can be processed in the same time. Higher value can reduce the overall list refresh time, but more parallel
download and processing jobs need more RAM. Please consider to reduce this value on systems with limited memory. Default value is 4.
!!! example
!!! example
```yaml
blocking:

View File

@ -95,7 +95,7 @@ func (b *ListCache) Configuration() (result []string) {
// NewListCache creates new list instance
func NewListCache(t ListCacheType, groupToLinks map[string][]string, refreshPeriod time.Duration,
downloader FileDownloader, processingConcurrency uint) (*ListCache, error) {
downloader FileDownloader, processingConcurrency uint, async bool) (*ListCache, error) {
groupCaches := make(map[string]stringcache.StringCache)
if processingConcurrency == 0 {
@ -110,7 +110,13 @@ func NewListCache(t ListCacheType, groupToLinks map[string][]string, refreshPeri
listType: t,
processingConcurrency: processingConcurrency,
}
initError := b.refresh(true)
var initError error
if async {
initError = nil
} else {
initError = b.refresh(true)
}
if initError == nil {
go periodicUpdate(b)

View File

@ -12,7 +12,7 @@ func BenchmarkRefresh(b *testing.B) {
"gr1": {file1, file2, file3},
}
cache, _ := NewListCache(ListCacheTypeBlacklist, lists, -1, NewDownloader(), 5)
cache, _ := NewListCache(ListCacheTypeBlacklist, lists, -1, NewDownloader(), 5, false)
b.ReportAllocs()

View File

@ -21,36 +21,39 @@ import (
var _ = Describe("ListCache", func() {
var (
emptyFile, file1, file2, file3 *os.File
tmpDir *TmpFolder
emptyFile, file1, file2, file3 *TmpFile
server1, server2, server3 *httptest.Server
)
BeforeEach(func() {
emptyFile = TempFile("#empty file\n\n")
server1 = TestServer("blocked1.com\nblocked1a.com\n192.168.178.55")
server2 = TestServer("blocked2.com")
server3 = TestServer("blocked3.com\nblocked1a.com")
tmpDir = NewTmpFolder("ListCache")
Expect(tmpDir.Error).Should(Succeed())
DeferCleanup(tmpDir.Clean)
file1 = TempFile("blocked1.com\nblocked1a.com")
file2 = TempFile("blocked2.com")
file3 = TempFile("blocked3.com\nblocked1a.com")
})
AfterEach(func() {
_ = os.Remove(emptyFile.Name())
_ = os.Remove(file1.Name())
_ = os.Remove(file2.Name())
_ = os.Remove(file3.Name())
server1.Close()
server2.Close()
server3.Close()
server1 = TestServer("blocked1.com\nblocked1a.com\n192.168.178.55")
DeferCleanup(server1.Close)
server2 = TestServer("blocked2.com")
DeferCleanup(server2.Close)
server3 = TestServer("blocked3.com\nblocked1a.com")
DeferCleanup(server3.Close)
emptyFile = tmpDir.CreateStringFile("empty", "#empty file")
Expect(emptyFile.Error).Should(Succeed())
file1 = tmpDir.CreateStringFile("file1", "blocked1.com", "blocked1a.com")
Expect(file1.Error).Should(Succeed())
file2 = tmpDir.CreateStringFile("file2", "blocked2.com")
Expect(file2.Error).Should(Succeed())
file3 = tmpDir.CreateStringFile("file3", "blocked3.com", "blocked1a.com")
Expect(file3.Error).Should(Succeed())
})
Describe("List cache and matching", func() {
When("Query with empty", func() {
It("should not panic", func() {
lists := map[string][]string{
"gr0": {emptyFile.Name()},
"gr0": {emptyFile.Path},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
found, group := sut.Match("", []string{"gr0"})
@ -62,9 +65,9 @@ var _ = Describe("ListCache", func() {
When("List is empty", func() {
It("should not match anything", func() {
lists := map[string][]string{
"gr1": {emptyFile.Name()},
"gr1": {emptyFile.Path},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
found, group := sut.Match("google.com", []string{"gr1"})
@ -99,6 +102,7 @@ var _ = Describe("ListCache", func() {
4*time.Hour,
mockDownloader,
defaultProcessingConcurrency,
false,
)
Expect(err).Should(Succeed())
@ -144,7 +148,8 @@ var _ = Describe("ListCache", func() {
"gr1": {"http://dummy"},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, mockDownloader, defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, mockDownloader,
defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
By("Lists loaded without err", func() {
@ -174,7 +179,7 @@ var _ = Describe("ListCache", func() {
"gr2": {server3.URL},
}
sut, _ := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, _ := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency, false)
found, group := sut.Match("blocked1.com", []string{"gr1", "gr2"})
Expect(found).Should(BeTrue())
@ -196,7 +201,7 @@ var _ = Describe("ListCache", func() {
"gr2": {server3.URL, "someotherfile"},
}
sut, _ := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, _ := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency, false)
found, group := sut.Match("blocked1.com", []string{"gr1", "gr2"})
Expect(found).Should(BeTrue())
@ -223,7 +228,7 @@ var _ = Describe("ListCache", func() {
resultCnt = cnt
})
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
found, group := sut.Match("blocked1.com", []string{})
@ -235,11 +240,11 @@ var _ = Describe("ListCache", func() {
When("multiple groups are passed", func() {
It("should match", func() {
lists := map[string][]string{
"gr1": {file1.Name(), file2.Name()},
"gr2": {"file://" + file3.Name()},
"gr1": {file1.Path, file2.Path},
"gr2": {"file://" + file3.Path},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
Expect(sut.groupCaches["gr1"].ElementCount()).Should(Equal(3))
@ -267,7 +272,8 @@ var _ = Describe("ListCache", func() {
"gr1": {file1, file2, file3},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(),
defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
Expect(sut.groupCaches["gr1"].ElementCount()).Should(Equal(38000))
@ -279,7 +285,8 @@ var _ = Describe("ListCache", func() {
"gr1": {"inlinedomain1.com\n#some comment\ninlinedomain2.com"},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(),
defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
Expect(sut.groupCaches["gr1"].ElementCount()).Should(Equal(2))
@ -299,7 +306,8 @@ var _ = Describe("ListCache", func() {
"gr1": {"inlinedomain1.com\n" + strings.Repeat("longString", 100000)},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(),
defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
found, group := sut.Match("inlinedomain1.com", []string{"gr1"})
@ -313,7 +321,8 @@ var _ = Describe("ListCache", func() {
"gr1": {"/^apple\\.(de|com)$/\n"},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, 0, NewDownloader(),
defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
found, group := sut.Match("apple.com", []string{"gr1"})
@ -334,7 +343,8 @@ var _ = Describe("ListCache", func() {
"gr2": {"inline\ndefinition\n"},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, time.Hour, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, time.Hour, NewDownloader(),
defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
c := sut.Configuration()
@ -345,10 +355,11 @@ var _ = Describe("ListCache", func() {
When("refresh is disabled", func() {
It("should print 'refresh disabled'", func() {
lists := map[string][]string{
"gr1": {emptyFile.Name()},
"gr1": {emptyFile.Path},
}
sut, err := NewListCache(ListCacheTypeBlacklist, lists, -1, NewDownloader(), defaultProcessingConcurrency)
sut, err := NewListCache(ListCacheTypeBlacklist, lists, -1, NewDownloader(),
defaultProcessingConcurrency, false)
Expect(err).Should(Succeed())
c := sut.Configuration()
@ -356,6 +367,20 @@ var _ = Describe("ListCache", func() {
})
})
})
Describe("StartStrategy", func() {
When("async load is enabled", func() {
It("should never return an error", func() {
lists := map[string][]string{
"gr1": {"doesnotexist"},
}
_, err := NewListCache(ListCacheTypeBlacklist, lists, -1, NewDownloader(),
defaultProcessingConcurrency, true)
Expect(err).Should(Succeed())
})
})
})
})
type MockDownloader struct {

View File

@ -9,61 +9,102 @@ import (
"github.com/miekg/dns"
"github.com/sirupsen/logrus"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
. "github.com/onsi/gomega"
. "github.com/onsi/ginkgo/v2"
)
var err error
var _ = Describe("DatabaseWriter", func() {
Describe("Database query log", func() {
When("New log entry was created", func() {
It("should be persisted in the database", func() {
sqlite := sqlite.Open("file::memory:")
writer, err := newDatabaseWriter(sqlite, 7, time.Millisecond)
Expect(err).Should(Succeed())
request := &model.Request{
Req: util.NewMsgWithQuestion("google.de.", dns.Type(dns.TypeA)),
Log: logrus.NewEntry(log.Log()),
}
res, err := util.NewMsgWithAnswer("example.com", 123, dns.Type(dns.TypeA), "123.124.122.122")
Describe("Database query log to sqlite", func() {
var (
sqliteDB gorm.Dialector
writer *DatabaseWriter
request *model.Request
)
BeforeEach(func() {
sqliteDB = sqlite.Open("file::memory:")
request = &model.Request{
Req: util.NewMsgWithQuestion("google.de.", dns.Type(dns.TypeA)),
Log: logrus.NewEntry(log.Log()),
}
})
When("New log entry was created", func() {
BeforeEach(func() {
writer, err = newDatabaseWriter(sqliteDB, 7, time.Millisecond)
Expect(err).Should(Succeed())
})
It("should be persisted in the database", func() {
res, err := util.NewMsgWithAnswer("example.com", 123, dns.Type(dns.TypeA), "123.124.122.122")
Expect(err).Should(Succeed())
response := &model.Response{
Res: res,
Reason: "Resolved",
RType: model.ResponseTypeRESOLVED,
}
// one entry with now as timestamp
writer.Write(&LogEntry{
Request: request,
Response: response,
Start: time.Now(),
DurationMs: 20,
})
// one entry before 2 days
writer.Write(&LogEntry{
Request: request,
Response: response,
Start: time.Now().AddDate(0, 0, -2),
DurationMs: 20,
})
// force write
writer.doDBWrite()
// 2 entries in the database
Eventually(func() int64 {
var res int64
result := writer.db.Find(&logEntry{})
result.Count(&res)
return res
}, "5s").Should(BeNumerically("==", 2))
// do cleanup now
writer.CleanUp()
// now only 1 entry in the database
Eventually(func() (res int64) {
result := writer.db.Find(&logEntry{})
result.Count(&res)
return res
}, "1s").Should(BeNumerically("==", 1))
}, "5s").Should(BeNumerically("==", 2))
})
})
When("There are log entries with timestamp exceeding the retention period", func() {
BeforeEach(func() {
writer, err = newDatabaseWriter(sqliteDB, 1, time.Millisecond)
Expect(err).Should(Succeed())
})
It("these old entries should be deleted", func() {
sqlite := sqlite.Open("file::memory:")
writer, err := newDatabaseWriter(sqlite, 1, time.Millisecond)
Expect(err).Should(Succeed())
request := &model.Request{
Req: util.NewMsgWithQuestion("google.de.", dns.Type(dns.TypeA)),
Log: logrus.NewEntry(log.Log()),
}
res, err := util.NewMsgWithAnswer("example.com", 123, dns.Type(dns.TypeA), "123.124.122.122")
Expect(err).Should(Succeed())
response := &model.Response{
Res: res,
Reason: "Resolved",
@ -86,6 +127,9 @@ var _ = Describe("DatabaseWriter", func() {
DurationMs: 20,
})
// force write
writer.doDBWrite()
// 2 entries in the database
Eventually(func() int64 {
var res int64
@ -94,7 +138,7 @@ var _ = Describe("DatabaseWriter", func() {
result.Count(&res)
return res
}, "1s").Should(BeNumerically("==", 2))
}, "5s").Should(BeNumerically("==", 2))
// do cleanup now
writer.CleanUp()
@ -106,10 +150,12 @@ var _ = Describe("DatabaseWriter", func() {
result.Count(&res)
return res
}, "1s").Should(BeNumerically("==", 1))
}, "5s").Should(BeNumerically("==", 1))
})
})
})
Describe("Database query log fails", func() {
When("mysql connection parameters wrong", func() {
It("should be log with fatal", func() {
_, err := NewDatabaseWriter("mysql", "wrong param", 7, 1)

View File

@ -101,13 +101,15 @@ func NewBlockingResolver(cfg config.BlockingConfig,
refreshPeriod := time.Duration(cfg.RefreshPeriod)
downloader := createDownloader(cfg, bootstrap)
blacklistMatcher, blErr := lists.NewListCache(lists.ListCacheTypeBlacklist, cfg.BlackLists,
refreshPeriod, downloader, cfg.ProcessingConcurrency)
refreshPeriod, downloader, cfg.ProcessingConcurrency,
(cfg.StartStrategy == config.StartStrategyTypeFast))
whitelistMatcher, wlErr := lists.NewListCache(lists.ListCacheTypeWhitelist, cfg.WhiteLists,
refreshPeriod, downloader, cfg.ProcessingConcurrency)
refreshPeriod, downloader, cfg.ProcessingConcurrency,
(cfg.StartStrategy == config.StartStrategyTypeFast))
whitelistOnlyGroups := determineWhitelistOnlyGroups(&cfg)
err = multierror.Append(err, blErr, wlErr).ErrorOrNil()
if err != nil && cfg.FailStartOnListError {
if err != nil && cfg.StartStrategy == config.StartStrategyTypeFailOnError {
return nil, err
}

View File

@ -859,14 +859,13 @@ var _ = Describe("BlockingResolver", Label("blockingResolver"), func() {
MatchError("unknown blockType 'wrong', please use one of: ZeroIP, NxDomain or specify destination IP address(es)"))
})
})
When("failStartOnListError is active", func() {
When("startStrategy is failOnError", func() {
It("should fail if lists can't be downloaded", func() {
_, err := NewBlockingResolver(config.BlockingConfig{
BlackLists: map[string][]string{"gr1": {"wrongPath"}},
WhiteLists: map[string][]string{"whitelist": {"wrongPath"}},
FailStartOnListError: true,
BlockType: "zeroIp",
BlackLists: map[string][]string{"gr1": {"wrongPath"}},
WhiteLists: map[string][]string{"whitelist": {"wrongPath"}},
StartStrategy: config.StartStrategyTypeFailOnError,
BlockType: "zeroIp",
}, nil, skipUpstreamCheck)
Expect(err).Should(HaveOccurred())
})