Use a RWMutex instead of an AtomicBool, to reduce contention

This commit is contained in:
Deluan 2020-07-26 00:45:33 -04:00
parent f8f16d676d
commit 67da83c84d
4 changed files with 39 additions and 28 deletions

View File

@ -5,12 +5,12 @@ import (
"fmt"
"io"
"path/filepath"
"sync"
"time"
"github.com/deluan/navidrome/conf"
"github.com/deluan/navidrome/consts"
"github.com/deluan/navidrome/log"
"github.com/deluan/navidrome/utils"
"github.com/djherbis/fscache"
"github.com/dustin/go-humanize"
)
@ -29,17 +29,21 @@ func NewFileCache(name, cacheSize, cacheFolder string, maxItems int, getReader R
cacheFolder: cacheFolder,
maxItems: maxItems,
getReader: getReader,
disabled: utils.AtomicBool{},
ready: utils.AtomicBool{},
mutex: &sync.RWMutex{},
}
go func() {
cache, err := newFSCache(fc.name, fc.cacheSize, fc.cacheFolder, fc.maxItems)
fc.mutex.Lock()
defer fc.mutex.Unlock()
if err == nil {
fc.cache = cache
fc.disabled.Set(cache == nil)
fc.disabled = cache == nil
}
fc.ready = true
if fc.disabled {
log.Debug("Cache disabled", "cache", fc.name, "size", fc.cacheSize)
}
fc.ready.Set(true)
}()
return fc
@ -52,18 +56,30 @@ type fileCache struct {
maxItems int
cache fscache.Cache
getReader ReadFunc
disabled utils.AtomicBool
ready utils.AtomicBool
disabled bool
ready bool
mutex *sync.RWMutex
}
func (fc *fileCache) Ready() bool {
fc.mutex.RLock()
defer fc.mutex.RUnlock()
return fc.ready
}
func (fc *fileCache) available(ctx context.Context) bool {
fc.mutex.RLock()
defer fc.mutex.RUnlock()
if !fc.ready {
log.Debug(ctx, "Cache not initialized yet", "cache", fc.name)
}
return fc.ready && !fc.disabled
}
func (fc *fileCache) Get(ctx context.Context, arg fmt.Stringer) (*CachedStream, error) {
if !fc.Ready() {
log.Debug(ctx, "Cache not initialized yet", "cache", fc.name)
}
if fc.disabled.Get() {
log.Debug(ctx, "Cache disabled", "cache", fc.name)
}
if fc.disabled.Get() || !fc.Ready() {
if !fc.available(ctx) {
reader, err := fc.getReader(ctx, arg)
if err != nil {
return nil, err
@ -108,10 +124,6 @@ func (fc *fileCache) Get(ctx context.Context, arg fmt.Stringer) (*CachedStream,
return &CachedStream{Reader: r, Cached: cached}, nil
}
func (fc *fileCache) Ready() bool {
return fc.ready.Get()
}
type CachedStream struct {
io.Reader
io.Seeker

View File

@ -40,13 +40,13 @@ var _ = Describe("File Caches", func() {
It("creates the cache folder with invalid size", func() {
fc := callNewFileCache("test", "abc", "test", 0, nil)
Expect(fc.cache).ToNot(BeNil())
Expect(fc.disabled.Get()).To(BeFalse())
Expect(fc.disabled).To(BeFalse())
})
It("returns empty if cache size is '0'", func() {
fc := callNewFileCache("test", "0", "test", 0, nil)
Expect(fc.cache).To(BeNil())
Expect(fc.disabled.Get()).To(BeTrue())
Expect(fc.disabled).To(BeTrue())
})
})

View File

@ -70,8 +70,7 @@ func (ms *mediaStreamer) NewStream(ctx context.Context, id string, reqFormat str
if err != nil {
return nil, err
}
s.Reader = f
s.Closer = f
s.ReadCloser = f
s.Seeker = f
s.format = mf.Suffix
return s, nil
@ -93,10 +92,9 @@ func (ms *mediaStreamer) NewStream(ctx context.Context, id string, reqFormat str
log.Debug(ctx, "Streaming TRANSCODED file", "id", mf.ID, "path", mf.Path,
"requestBitrate", reqBitRate, "requestFormat", reqFormat,
"originalBitrate", mf.BitRate, "originalFormat", mf.Suffix,
"selectedBitrate", bitRate, "selectedFormat", format, "cached", cached)
"selectedBitrate", bitRate, "selectedFormat", format, "cached", cached, "seekable", s.Seekable())
s.Reader = r
s.Closer = r
s.ReadCloser = r
if r.Seekable() {
s.Seeker = r
}
@ -109,8 +107,7 @@ type Stream struct {
mf *model.MediaFile
bitRate int
format string
io.Reader
io.Closer
io.ReadCloser
io.Seeker
}

View File

@ -32,8 +32,10 @@ func (c *StreamController) Stream(w http.ResponseWriter, r *http.Request) (*resp
if err != nil {
return nil, err
}
// Make sure the stream will be closed at the end, to avoid leakage
defer func() {
if err := stream.Close(); err != nil {
if err := stream.Close(); err != nil && log.CurrentLevel() >= log.LevelDebug {
log.Error("Error closing stream", "id", id, "file", stream.Name(), err)
}
}()