Skip to content

Commit 627928e

Browse files
dgrisonnetdims
authored andcommitted
devicemapper: use atomic.Value for lock-free cache reads
The ThinPoolWatcher cache is read frequently by container housekeeping goroutines (via GetUsage) while being updated every 15 seconds by Refresh(). Using atomic.Value allows lock-free reads, eliminating potential contention between the many readers and the single writer. Signed-off-by: Damien Grisonnet <[email protected]>
1 parent 558703b commit 627928e

File tree

2 files changed

+27
-18
lines changed

2 files changed

+27
-18
lines changed

devicemapper/thin_pool_watcher.go

Lines changed: 27 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -17,19 +17,35 @@ package devicemapper
1717
import (
1818
"fmt"
1919
"strings"
20-
"sync"
20+
"sync/atomic"
2121
"time"
2222

2323
"k8s.io/klog/v2"
2424
)
2525

26+
// usageCache is a typed wrapper around atomic.Value that eliminates the need
27+
// for type assertions at every call site. It stores device ID strings mapped
28+
// to usage values (uint64).
29+
type usageCache struct {
30+
v atomic.Value
31+
}
32+
33+
// Load retrieves the current cache map.
34+
func (c *usageCache) Load() map[string]uint64 {
35+
return c.v.Load().(map[string]uint64)
36+
}
37+
38+
// Store saves a new cache map.
39+
func (c *usageCache) Store(m map[string]uint64) {
40+
c.v.Store(m)
41+
}
42+
2643
// ThinPoolWatcher maintains a cache of device name -> usage stats for a
2744
// devicemapper thin-pool using thin_ls.
2845
type ThinPoolWatcher struct {
2946
poolName string
3047
metadataDevice string
31-
lock *sync.RWMutex
32-
cache map[string]uint64
48+
cache usageCache
3349
period time.Duration
3450
stopChan chan struct{}
3551
dmsetup DmsetupClient
@@ -44,15 +60,16 @@ func NewThinPoolWatcher(poolName, metadataDevice string) (*ThinPoolWatcher, erro
4460
return nil, fmt.Errorf("encountered error creating thin_ls client: %v", err)
4561
}
4662

47-
return &ThinPoolWatcher{poolName: poolName,
63+
w := &ThinPoolWatcher{
64+
poolName: poolName,
4865
metadataDevice: metadataDevice,
49-
lock: &sync.RWMutex{},
50-
cache: make(map[string]uint64),
5166
period: 15 * time.Second,
5267
stopChan: make(chan struct{}),
5368
dmsetup: NewDmsetupClient(),
5469
thinLsClient: thinLsClient,
55-
}, nil
70+
}
71+
w.cache.Store(map[string]uint64{})
72+
return w, nil
5673
}
5774

5875
// Start starts the ThinPoolWatcher.
@@ -87,14 +104,11 @@ func (w *ThinPoolWatcher) Stop() {
87104

88105
// GetUsage gets the cached usage value of the given device.
89106
func (w *ThinPoolWatcher) GetUsage(deviceID string) (uint64, error) {
90-
w.lock.RLock()
91-
defer w.lock.RUnlock()
92-
93-
v, ok := w.cache[deviceID]
107+
cache := w.cache.Load()
108+
v, ok := cache[deviceID]
94109
if !ok {
95110
return 0, fmt.Errorf("no cached value for usage of device %v", deviceID)
96111
}
97-
98112
return v, nil
99113
}
100114

@@ -106,9 +120,6 @@ const (
106120
// Refresh performs a `thin_ls` of the pool being watched and refreshes the
107121
// cached data with the result.
108122
func (w *ThinPoolWatcher) Refresh() error {
109-
w.lock.Lock()
110-
defer w.lock.Unlock()
111-
112123
currentlyReserved, err := w.checkReservation(w.poolName)
113124
if err != nil {
114125
err = fmt.Errorf("error determining whether snapshot is reserved: %v", err)
@@ -148,7 +159,7 @@ func (w *ThinPoolWatcher) Refresh() error {
148159
return err
149160
}
150161

151-
w.cache = newCache
162+
w.cache.Store(newCache)
152163
return nil
153164
}
154165

devicemapper/thin_pool_watcher_test.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ package devicemapper
1616

1717
import (
1818
"fmt"
19-
"sync"
2019
"testing"
2120
"time"
2221

@@ -132,7 +131,6 @@ func TestRefresh(t *testing.T) {
132131
watcher := &ThinPoolWatcher{
133132
poolName: "test pool name",
134133
metadataDevice: "/dev/mapper/metadata-device",
135-
lock: &sync.RWMutex{},
136134
period: 15 * time.Second,
137135
stopChan: make(chan struct{}),
138136
dmsetup: dmsetup,

0 commit comments

Comments
 (0)