diff --git a/internal/operations/account.go b/internal/operations/account.go index 6d03ed04..078ee9f2 100644 --- a/internal/operations/account.go +++ b/internal/operations/account.go @@ -2,24 +2,25 @@ package operations import ( "context" + "sort" + "strings" + "time" + "github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/store" + "github.com/alist-org/alist/v3/pkg/generic_sync" "github.com/alist-org/alist/v3/pkg/utils" "github.com/pkg/errors" - "sort" - "strings" - "sync" - "time" ) // Although the driver type is stored, // there is an account in each driver, // so it should actually be an account, just wrapped by the driver -var accountsMap = map[string]driver.Driver{} +var accountsMap generic_sync.MapOf[string, driver.Driver] func GetAccountByVirtualPath(virtualPath string) (driver.Driver, error) { - accountDriver, ok := accountsMap[virtualPath] + accountDriver, ok := accountsMap.Load(virtualPath) if !ok { return nil, errors.Errorf("no virtual path for an account is: %s", virtualPath) } @@ -45,7 +46,7 @@ func CreateAccount(ctx context.Context, account model.Account) error { if err != nil { return errors.WithMessage(err, "failed init account") } - accountsMap[account.VirtualPath] = accountDriver + accountsMap.Store(account.VirtualPath, accountDriver) return nil } @@ -63,8 +64,8 @@ func UpdateAccount(ctx context.Context, account model.Account) error { return errors.WithMessage(err, "failed update account in database") } if oldAccount.VirtualPath != account.VirtualPath { - // virtual path renamed - delete(accountsMap, oldAccount.VirtualPath) + // virtual path renamed, need to drop the account + accountsMap.Delete(oldAccount.VirtualPath) } accountDriver, err := GetAccountByVirtualPath(oldAccount.VirtualPath) if err != nil { @@ -78,7 +79,7 @@ func UpdateAccount(ctx context.Context, account model.Account) error { if err != nil { return errors.WithMessage(err, "failed init account") } - accountsMap[account.VirtualPath] = accountDriver + accountsMap.Store(account.VirtualPath, accountDriver) return nil } @@ -104,26 +105,27 @@ func SaveDriverAccount(driver driver.Driver) error { func GetAccountsByPath(path string) []driver.Driver { accounts := make([]driver.Driver, 0) curSlashCount := 0 - for _, v := range accountsMap { - virtualPath := utils.GetActualVirtualPath(v.GetAccount().VirtualPath) + accountsMap.Range(func(key string, value driver.Driver) bool { + virtualPath := utils.GetActualVirtualPath(value.GetAccount().VirtualPath) if virtualPath == "/" { virtualPath = "" } // not this if path != virtualPath && !strings.HasPrefix(path, virtualPath+"/") { - continue + return true } slashCount := strings.Count(virtualPath, "/") // not the longest match if slashCount < curSlashCount { - continue + return true } if slashCount > curSlashCount { accounts = accounts[:0] curSlashCount = slashCount } - accounts = append(accounts, v) - } + accounts = append(accounts, value) + return true + }) // make sure the order is the same for same input sort.Slice(accounts, func(i, j int) bool { return accounts[i].GetAccount().VirtualPath < accounts[j].GetAccount().VirtualPath @@ -136,12 +138,7 @@ func GetAccountsByPath(path string) []driver.Driver { // GetAccountVirtualFilesByPath(/a) => b,c,d func GetAccountVirtualFilesByPath(prefix string) []driver.FileInfo { files := make([]driver.FileInfo, 0) - accounts := make([]driver.Driver, len(accountsMap)) - i := 0 - for _, v := range accountsMap { - accounts[i] = v - i += 1 - } + accounts := accountsMap.Values() sort.Slice(accounts, func(i, j int) bool { if accounts[i].GetAccount().Index == accounts[j].GetAccount().Index { return accounts[i].GetAccount().VirtualPath < accounts[j].GetAccount().VirtualPath @@ -177,7 +174,7 @@ func GetAccountVirtualFilesByPath(prefix string) []driver.FileInfo { return files } -var balanceMap sync.Map +var balanceMap generic_sync.MapOf[string, int] // GetBalancedAccount get account by path func GetBalancedAccount(path string) driver.Driver { @@ -194,7 +191,7 @@ func GetBalancedAccount(path string) driver.Driver { cur, ok := balanceMap.Load(virtualPath) i := 0 if ok { - i = cur.(int) + i = cur i = (i + 1) % accountNum balanceMap.Store(virtualPath, i) } else { diff --git a/pkg/generic_sync/map.go b/pkg/generic_sync/map.go new file mode 100644 index 00000000..74f89d7e --- /dev/null +++ b/pkg/generic_sync/map.go @@ -0,0 +1,383 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package generic_sync + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// MapOf is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The MapOf type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The MapOf type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a MapOf may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero MapOf is empty and ready for use. A MapOf must not be copied after first use. +type MapOf[K comparable, V any] struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[K]*entry[V] + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the MapOf.read field. +type readOnly[K comparable, V any] struct { + m map[K]*entry[V] + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var expunged = unsafe.Pointer(new(interface{})) + +// An entry is a slot in the map corresponding to a particular key. +type entry[V any] struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntry[V any](i V) *entry[V] { + return &entry[V]{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *MapOf[K, V]) Load(key K) (value V, ok bool) { + read, _ := m.read.Load().(readOnly[K, V]) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnly[K, V]) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return value, false + } + return e.load() +} + +func (e *entry[V]) load() (value V, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return value, false + } + return *(*V)(p), true +} + +// Store sets the value for a key. +func (m *MapOf[K, V]) Store(key K, value V) { + read, _ := m.read.Load().(readOnly[K, V]) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly[K, V]) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entry[V]) tryStore(i *V) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry[V]) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expunged, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entry[V]) storeLocked(i *V) { + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnly[K, V]) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly[K, V]) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn'V bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return actual, false, false + } + if p != nil { + return *(*V)(p), true, true + } + } +} + +// Delete deletes the value for a key. +func (m *MapOf[K, V]) Delete(key K) { + read, _ := m.read.Load().(readOnly[K, V]) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnly[K, V]) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entry[V]) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the MapOf's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *MapOf[K, V]) Range(f func(key K, value V) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnly[K, V]) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnly[K, V]) + if read.amended { + read = readOnly[K, V]{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +// Values returns a slice of the values in the map. +func (m *MapOf[K, V]) Values() []V { + var values []V + m.Range(func(key K, value V) bool { + values = append(values, value) + return true + }) + return values +} + +func (m *MapOf[K, V]) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnly[K, V]{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *MapOf[K, V]) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnly[K, V]) + m.dirty = make(map[K]*entry[V], len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entry[V]) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expunged +} diff --git a/pkg/generic_sync/map_test.go b/pkg/generic_sync/map_test.go new file mode 100644 index 00000000..22d78319 --- /dev/null +++ b/pkg/generic_sync/map_test.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package generic_sync_test + +import ( + "math/rand" + "runtime" + "sync" + "testing" + + "github.com/alist-org/alist/v3/pkg/generic_sync" +) + +func TestConcurrentRange(t *testing.T) { + const mapSize = 1 << 10 + + m := new(generic_sync.MapOf[int64, int64]) + for n := int64(1); n <= mapSize; n++ { + m.Store(n, int64(n)) + } + + done := make(chan struct{}) + var wg sync.WaitGroup + defer func() { + close(done) + wg.Wait() + }() + for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- { + r := rand.New(rand.NewSource(g)) + wg.Add(1) + go func(g int64) { + defer wg.Done() + for i := int64(0); ; i++ { + select { + case <-done: + return + default: + } + for n := int64(1); n < mapSize; n++ { + if r.Int63n(mapSize) == 0 { + m.Store(n, n*i*g) + } else { + m.Load(n) + } + } + } + }(g) + } + + iters := 1 << 10 + if testing.Short() { + iters = 16 + } + for n := iters; n > 0; n-- { + seen := make(map[int64]bool, mapSize) + + m.Range(func(k, v int64) bool { + if v%k != 0 { + t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) + } + if seen[k] { + t.Fatalf("Range visited key %v twice", k) + } + seen[k] = true + return true + }) + + if len(seen) != mapSize { + t.Fatalf("Range visited %v elements of %v-element MapOf", len(seen), mapSize) + } + } +}