Remove empty (invalid) entries from the cache

This commit is contained in:
Deluan 2022-12-24 18:53:09 -05:00 committed by Deluan Quintão
parent e89d99aee0
commit 52a4721c91
3 changed files with 203 additions and 1 deletions

View File

@ -210,7 +210,7 @@ func newFSCache(name, cacheSize, cacheFolder string, maxItems int) (fscache.Cach
return nil, nil
}
lru := fscache.NewLRUHaunter(maxItems, int64(size), consts.DefaultCacheCleanUpInterval)
lru := NewFileHaunter(maxItems, int64(size), consts.DefaultCacheCleanUpInterval)
h := fscache.NewLRUHaunterStrategy(lru)
cacheFolder = filepath.Join(conf.Server.DataFolder, cacheFolder)

121
utils/cache/file_haunter.go vendored Normal file
View File

@ -0,0 +1,121 @@
package cache
import (
"sort"
"time"
"github.com/djherbis/fscache"
"github.com/navidrome/navidrome/log"
)
type haunterKV struct {
key string
value fscache.Entry
info fscache.FileInfo
}
// NewFileHaunter returns a simple haunter which runs every "period"
// and scrubs older files when the total file size is over maxSize or
// total item count is over maxItems. It also removes empty (invalid) files.
// If maxItems or maxSize are 0, they won't be checked
//
// Based on fscache.NewLRUHaunter
func NewFileHaunter(maxItems int, maxSize int64, period time.Duration) fscache.LRUHaunter {
return &fileHaunter{
period: period,
maxItems: maxItems,
maxSize: maxSize,
}
}
type fileHaunter struct {
period time.Duration
maxItems int
maxSize int64
}
func (j *fileHaunter) Next() time.Duration {
return j.period
}
func (j *fileHaunter) Scrub(c fscache.CacheAccessor) (keysToReap []string) {
var count int
var size int64
var okFiles []haunterKV
c.EnumerateEntries(func(key string, e fscache.Entry) bool {
if e.InUse() {
return true
}
fileInfo, err := c.Stat(e.Name())
if err != nil {
return true
}
if fileInfo.Size() == 0 {
log.Trace("Removing invalid empty file", "file", e.Name())
keysToReap = append(keysToReap, key)
}
count++
size = size + fileInfo.Size()
okFiles = append(okFiles, haunterKV{
key: key,
value: e,
info: fileInfo,
})
return true
})
sort.Slice(okFiles, func(i, j int) bool {
iLastRead := okFiles[i].info.AccessTime()
jLastRead := okFiles[j].info.AccessTime()
return iLastRead.Before(jLastRead)
})
collectKeysToReapFn := func() bool {
var key *string
var err error
key, count, size, err = j.removeFirst(&okFiles, count, size)
if err != nil {
return false
}
if key != nil {
keysToReap = append(keysToReap, *key)
}
return true
}
if j.maxItems > 0 {
for count > j.maxItems {
if !collectKeysToReapFn() {
break
}
}
}
if j.maxSize > 0 {
for size > j.maxSize {
if !collectKeysToReapFn() {
break
}
}
}
return keysToReap
}
func (j *fileHaunter) removeFirst(items *[]haunterKV, count int, size int64) (*string, int, int64, error) {
var f haunterKV
f, *items = (*items)[0], (*items)[1:]
count--
size = size - f.info.Size()
return &f.key, count, size, nil
}

81
utils/cache/file_hauter_test.go vendored Normal file
View File

@ -0,0 +1,81 @@
package cache_test
import (
"fmt"
"io"
"os"
"path/filepath"
"testing"
"time"
"github.com/djherbis/fscache"
"github.com/navidrome/navidrome/utils/cache"
)
func TestFileHaunterMaxSize(t *testing.T) {
tempDir, _ := os.MkdirTemp("", "spread_fs")
cacheDir := filepath.Join(tempDir, "cache1")
fs, err := fscache.NewFs(cacheDir, 0700)
if err != nil {
t.Error(err.Error())
t.FailNow()
}
defer os.RemoveAll(tempDir)
c, err := fscache.NewCacheWithHaunter(fs, fscache.NewLRUHaunterStrategy(cache.NewFileHaunter(0, 24, 400*time.Millisecond)))
if err != nil {
t.Error(err.Error())
return
}
defer c.Clean() //nolint:errcheck
// Create 5 normal files and 1 empty
for i := 0; i < 6; i++ {
name := fmt.Sprintf("stream-%v", i)
var r fscache.ReadAtCloser
if i < 5 {
r = createCachedStream(c, name, "hello")
} else { // Last one is empty
r = createCachedStream(c, name, "")
}
if !c.Exists(name) {
t.Errorf(name + " should exist")
}
<-time.After(10 * time.Millisecond)
err := r.Close()
if err != nil {
t.Error(err)
}
}
<-time.After(400 * time.Millisecond)
if c.Exists("stream-0") {
t.Errorf("stream-0 should have been scrubbed")
}
if c.Exists("stream-5") {
t.Errorf("stream-5 should have been scrubbed")
}
files, err := os.ReadDir(cacheDir)
if err != nil {
t.Error(err.Error())
return
}
if len(files) != 4 {
t.Errorf("expected 4 items in directory")
}
}
func createCachedStream(c *fscache.FSCache, name string, contents string) fscache.ReadAtCloser {
r, w, _ := c.Get(name)
_, _ = w.Write([]byte(contents))
_ = w.Close()
_, _ = io.Copy(io.Discard, r)
return r
}