@@ -17,19 +17,35 @@ package devicemapper
1717import (
1818 "fmt"
1919 "strings"
20- "sync"
20+ "sync/atomic "
2121 "time"
2222
2323 "k8s.io/klog/v2"
2424)
2525
26+ // usageCache is a typed wrapper around atomic.Value that eliminates the need
27+ // for type assertions at every call site. It stores device ID strings mapped
28+ // to usage values (uint64).
29+ type usageCache struct {
30+ v atomic.Value
31+ }
32+
33+ // Load retrieves the current cache map.
34+ func (c * usageCache ) Load () map [string ]uint64 {
35+ return c .v .Load ().(map [string ]uint64 )
36+ }
37+
38+ // Store saves a new cache map.
39+ func (c * usageCache ) Store (m map [string ]uint64 ) {
40+ c .v .Store (m )
41+ }
42+
2643// ThinPoolWatcher maintains a cache of device name -> usage stats for a
2744// devicemapper thin-pool using thin_ls.
2845type ThinPoolWatcher struct {
2946 poolName string
3047 metadataDevice string
31- lock * sync.RWMutex
32- cache map [string ]uint64
48+ cache usageCache
3349 period time.Duration
3450 stopChan chan struct {}
3551 dmsetup DmsetupClient
@@ -44,15 +60,16 @@ func NewThinPoolWatcher(poolName, metadataDevice string) (*ThinPoolWatcher, erro
4460 return nil , fmt .Errorf ("encountered error creating thin_ls client: %v" , err )
4561 }
4662
47- return & ThinPoolWatcher {poolName : poolName ,
63+ w := & ThinPoolWatcher {
64+ poolName : poolName ,
4865 metadataDevice : metadataDevice ,
49- lock : & sync.RWMutex {},
50- cache : make (map [string ]uint64 ),
5166 period : 15 * time .Second ,
5267 stopChan : make (chan struct {}),
5368 dmsetup : NewDmsetupClient (),
5469 thinLsClient : thinLsClient ,
55- }, nil
70+ }
71+ w .cache .Store (map [string ]uint64 {})
72+ return w , nil
5673}
5774
5875// Start starts the ThinPoolWatcher.
@@ -87,14 +104,11 @@ func (w *ThinPoolWatcher) Stop() {
87104
88105// GetUsage gets the cached usage value of the given device.
89106func (w * ThinPoolWatcher ) GetUsage (deviceID string ) (uint64 , error ) {
90- w .lock .RLock ()
91- defer w .lock .RUnlock ()
92-
93- v , ok := w .cache [deviceID ]
107+ cache := w .cache .Load ()
108+ v , ok := cache [deviceID ]
94109 if ! ok {
95110 return 0 , fmt .Errorf ("no cached value for usage of device %v" , deviceID )
96111 }
97-
98112 return v , nil
99113}
100114
@@ -106,9 +120,6 @@ const (
106120// Refresh performs a `thin_ls` of the pool being watched and refreshes the
107121// cached data with the result.
108122func (w * ThinPoolWatcher ) Refresh () error {
109- w .lock .Lock ()
110- defer w .lock .Unlock ()
111-
112123 currentlyReserved , err := w .checkReservation (w .poolName )
113124 if err != nil {
114125 err = fmt .Errorf ("error determining whether snapshot is reserved: %v" , err )
@@ -148,7 +159,7 @@ func (w *ThinPoolWatcher) Refresh() error {
148159 return err
149160 }
150161
151- w .cache = newCache
162+ w .cache . Store ( newCache )
152163 return nil
153164}
154165
0 commit comments