Use sync/atomic package, now that we are at Go 1.19

This commit is contained in:
Deluan 2023-02-15 21:21:59 -05:00
parent 6dce4b2478
commit a134b1b608
3 changed files with 16 additions and 15 deletions

View File

@ -5,8 +5,7 @@ import (
"io" "io"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"github.com/navidrome/navidrome/utils"
) )
func NewMockFFmpeg(data string) *MockFFmpeg { func NewMockFFmpeg(data string) *MockFFmpeg {
@ -16,7 +15,7 @@ func NewMockFFmpeg(data string) *MockFFmpeg {
type MockFFmpeg struct { type MockFFmpeg struct {
io.Reader io.Reader
lock sync.Mutex lock sync.Mutex
closed utils.AtomicBool closed atomic.Bool
Error error Error error
} }
@ -54,10 +53,10 @@ func (ff *MockFFmpeg) Read(p []byte) (n int, err error) {
} }
func (ff *MockFFmpeg) Close() error { func (ff *MockFFmpeg) Close() error {
ff.closed.Set(true) ff.closed.Store(true)
return nil return nil
} }
func (ff *MockFFmpeg) IsClosed() bool { func (ff *MockFFmpeg) IsClosed() bool {
return ff.closed.Get() return ff.closed.Load()
} }

View File

@ -17,7 +17,7 @@ import (
// Call NewFileCache and wait for it to be ready // Call NewFileCache and wait for it to be ready
func callNewFileCache(name, cacheSize, cacheFolder string, maxItems int, getReader ReadFunc) *fileCache { func callNewFileCache(name, cacheSize, cacheFolder string, maxItems int, getReader ReadFunc) *fileCache {
fc := NewFileCache(name, cacheSize, cacheFolder, maxItems, getReader).(*fileCache) fc := NewFileCache(name, cacheSize, cacheFolder, maxItems, getReader).(*fileCache)
Eventually(func() bool { return fc.ready.Get() }).Should(BeTrue()) Eventually(func() bool { return fc.ready.Load() }).Should(BeTrue())
return fc return fc
} }

View File

@ -53,13 +53,15 @@ var _ = Describe("Pipeline", func() {
} }
close(inC) close(inC)
var current, count, max int32 current := atomic.Int32{}
count := atomic.Int32{}
max := atomic.Int32{}
outC, _ := pl.Stage(context.Background(), maxWorkers, inC, func(ctx context.Context, in int) (int, error) { outC, _ := pl.Stage(context.Background(), maxWorkers, inC, func(ctx context.Context, in int) (int, error) {
defer atomic.AddInt32(&current, -1) defer current.Add(-1)
c := atomic.AddInt32(&current, 1) c := current.Add(1)
atomic.AddInt32(&count, 1) count.Add(1)
if c > atomic.LoadInt32(&max) { if c > max.Load() {
atomic.StoreInt32(&max, c) max.Store(c)
} }
time.Sleep(10 * time.Millisecond) // Slow process time.Sleep(10 * time.Millisecond) // Slow process
return 0, nil return 0, nil
@ -68,9 +70,9 @@ var _ = Describe("Pipeline", func() {
for range outC { for range outC {
} }
Expect(count).To(Equal(int32(numJobs))) Expect(count.Load()).To(Equal(int32(numJobs)))
Expect(current).To(Equal(int32(0))) Expect(current.Load()).To(Equal(int32(0)))
Expect(max).To(Equal(int32(maxWorkers))) Expect(max.Load()).To(Equal(int32(maxWorkers)))
}) })
}) })
When("the context is canceled", func() { When("the context is canceled", func() {