diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1484a11..a64d49f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,10 +9,10 @@ jobs: test: strategy: matrix: - go-version: [ "1.24", "1.25" ] + go-version: [ "1.25", "1.26" ] runs-on: ubuntu-latest env: - GOLANGCI_LINT_VERSION: v2.4.0 + GOLANGCI_LINT_VERSION: v2.9.0 steps: - name: Checkout code diff --git a/.golangci.yml b/.golangci.yml index f6019ca..51eb4ae 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -47,6 +47,14 @@ linters: excludes: - G103 - G115 + revive: + rules: + - name: var-naming + arguments: + - [] + - [] + - - skip-package-name-checks: true + skip-package-name-collision-with-go-std: true testifylint: disable: - float-compare diff --git a/Makefile b/Makefile index 6d1daa9..2f76084 100755 --- a/Makefile +++ b/Makefile @@ -22,14 +22,6 @@ lint: @golangci-lint run ./... .PHONY: lint -# Generate Go files -generate: - @echo "==> Generating" - @go install -modfile go.tools.mod github.com/a8m/syncmap - @go generate - @echo "==> Done" -.PHONY: generate - # Run benchmarks bench: @go test -bench . ./... diff --git a/generate.go b/generate.go deleted file mode 100644 index 6cca290..0000000 --- a/generate.go +++ /dev/null @@ -1,11 +0,0 @@ -package statter - -//go:generate syncmap -pkg statter -name counterMap -o maps_counter.gen.go map[string]*Counter -//go:generate syncmap -pkg statter -name gaugeMap -o maps_gauge.gen.go map[string]*Gauge -//go:generate syncmap -pkg statter -name histogramMap -o maps_histogram.gen.go map[string]*Histogram -//go:generate syncmap -pkg statter -name timingMap -o maps_timing.gen.go map[string]*Timing - -//go:generate syncmap -pkg prometheus -name counterMap -o reporter/prometheus/maps_counter.gen.go map[string]*prometheus.CounterVec -//go:generate syncmap -pkg prometheus -name gaugeMap -o reporter/prometheus/maps_gauge.gen.go map[string]*prometheus.GaugeVec -//go:generate syncmap -pkg prometheus -name histogramMap -o reporter/prometheus/maps_histogram.gen.go map[string]*prometheus.HistogramVec -//go:generate syncmap -pkg prometheus -name bucketMap -o reporter/prometheus/maps_buckets.gen.go map[string][]float64 diff --git a/go.mod b/go.mod index 5a31527..59a9846 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,11 @@ module github.com/hamba/statter/v2 -go 1.24.0 +go 1.25.1 require ( github.com/VictoriaMetrics/metrics v1.40.2 github.com/cactus/go-statsd-client/v5 v5.1.0 + github.com/go4org/hashtriemap v0.0.0-20251130024219-545ba229f689 github.com/hamba/logger/v2 v2.9.0 github.com/prometheus/client_golang v1.23.2 github.com/stretchr/testify v1.11.1 @@ -27,7 +28,7 @@ require ( go.opentelemetry.io/otel v1.38.0 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/sys v0.36.0 // indirect + golang.org/x/sys v0.39.0 // indirect google.golang.org/protobuf v1.36.8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index ba3c0fb..6474ad6 100644 --- a/go.sum +++ b/go.sum @@ -10,6 +10,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go4org/hashtriemap v0.0.0-20251130024219-545ba229f689 h1:0psnKZ+N2IP43/SZC8SKx6OpFJwLmQb9m9QyV9BC2f8= +github.com/go4org/hashtriemap v0.0.0-20251130024219-545ba229f689/go.mod h1:OGmRfY/9QEK2P5zCRtmqfbCF283xPkU2dvVA4MvbvpI= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/hamba/logger/v2 v2.9.0 h1:gLa4AuoQ17XTBovyIewOK7sALX/sHDJO3kfPUQBUA2o= @@ -53,8 +55,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/bytes/buffer.go b/internal/bytes/buffer.go index 2dd224b..0291411 100755 --- a/internal/bytes/buffer.go +++ b/internal/bytes/buffer.go @@ -15,7 +15,7 @@ type Pool struct { // NewPool creates a new instance of Pool. func NewPool(size int) Pool { return Pool{p: &sync.Pool{ - New: func() interface{} { + New: func() any { return &Buffer{b: make([]byte, 0, size)} }, }} diff --git a/internal/bytes/buffer_test.go b/internal/bytes/buffer_test.go index c55f670..8499bf0 100755 --- a/internal/bytes/buffer_test.go +++ b/internal/bytes/buffer_test.go @@ -16,10 +16,7 @@ func TestPool(t *testing.T) { var wg sync.WaitGroup for range 10 { - wg.Add(1) - go func() { - defer wg.Done() - + wg.Go(func() { for range 100 { buf := p.Get() assert.Zero(t, buf.Len(), "Expected truncated Buffer") @@ -31,7 +28,7 @@ func TestPool(t *testing.T) { p.Put(buf) } - }() + }) } wg.Wait() diff --git a/internal/stats/stats.go b/internal/stats/stats.go index 4d078c6..e7bdaa4 100644 --- a/internal/stats/stats.go +++ b/internal/stats/stats.go @@ -17,7 +17,7 @@ type Pool struct { // NewPool returns a pool. func NewPool(percLimit int) *Pool { return &Pool{p: &sync.Pool{ - New: func() interface{} { + New: func() any { return NewSample(percLimit) }, }} diff --git a/internal/stats/stats_test.go b/internal/stats/stats_test.go index 9c0036e..f1d5221 100644 --- a/internal/stats/stats_test.go +++ b/internal/stats/stats_test.go @@ -13,10 +13,7 @@ func TestPool(t *testing.T) { var wg sync.WaitGroup for range 10 { - wg.Add(1) - go func() { - defer wg.Done() - + wg.Go(func() { for range 100 { s := p.Get() @@ -29,7 +26,7 @@ func TestPool(t *testing.T) { p.Put(s) } - }() + }) } wg.Wait() @@ -87,7 +84,7 @@ func BenchmarkSample(b *testing.B) { b.ReportAllocs() b.ResetTimer() - for range b.N { + for b.Loop() { s.Add(12.34) } } diff --git a/key.go b/key.go index 1b727d6..c04b2b8 100644 --- a/key.go +++ b/key.go @@ -11,7 +11,7 @@ const ( ) var keyPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &key{b: make([]byte, 0, 256)} }, } diff --git a/maps_counter.gen.go b/maps_counter.gen.go deleted file mode 100644 index 8a9b7a7..0000000 --- a/maps_counter.gen.go +++ /dev/null @@ -1,388 +0,0 @@ -// Code generated by syncmap; DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statter - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -// Map is like a Go map[interface{}]interface{} but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. -// Loads, stores, and deletes run in amortized constant time. -// -// The Map type is specialized. Most code should use a plain Go map instead, -// with separate locking or coordination, for better type safety and to make it -// easier to maintain other invariants along with the map content. -// -// The Map type is optimized for two common use cases: (1) when the entry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate Mutex or RWMutex. -// -// The zero Map is empty and ready for use. A Map must not be copied after first use. -type counterMap struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Value // readOnly - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[string]*entryCounterMap - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnlyCounterMap struct { - m map[string]*entryCounterMap - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expungedCounterMap = unsafe.Pointer(new(*Counter)) - -// An entry is a slot in the map corresponding to a particular key. -type entryCounterMap struct { - // p points to the interface{} value stored for the entry. - // - // If p == nil, the entry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer // *interface{} -} - -func newEntryCounterMap(i *Counter) *entryCounterMap { - return &entryCounterMap{p: unsafe.Pointer(&i)} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *counterMap) Load(key string) (value *Counter, ok bool) { - read, _ := m.read.Load().(readOnlyCounterMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read, _ = m.read.Load().(readOnlyCounterMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return value, false - } - return e.load() -} - -func (e *entryCounterMap) load() (value *Counter, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedCounterMap { - return value, false - } - return *(**Counter)(p), true -} - -// Store sets the value for a key. -func (m *counterMap) Store(key string, value *Counter) { - read, _ := m.read.Load().(readOnlyCounterMap) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCounterMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - e.storeLocked(&value) - } else if e, ok := m.dirty[key]; ok { - e.storeLocked(&value) - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyCounterMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryCounterMap(value) - } - m.mu.Unlock() -} - -// tryStore stores a value if the entry has not been expunged. -// -// If the entry is expunged, tryStore returns false and leaves the entry -// unchanged. -func (e *entryCounterMap) tryStore(i **Counter) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedCounterMap { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entryCounterMap) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedCounterMap, nil) -} - -// storeLocked unconditionally stores a value to the entry. -// -// The entry must be known not to be expunged. -func (e *entryCounterMap) storeLocked(i **Counter) { - atomic.StorePointer(&e.p, unsafe.Pointer(i)) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *counterMap) LoadOrStore(key string, value *Counter) (actual *Counter, loaded bool) { - // Avoid locking if it's a clean hit. - read, _ := m.read.Load().(readOnlyCounterMap) - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCounterMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyCounterMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryCounterMap(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entryCounterMap) tryLoadOrStore(i *Counter) (actual *Counter, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expungedCounterMap { - return actual, false, false - } - if p != nil { - return *(**Counter)(p), true, true - } - - // Copy the interface after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expungedCounterMap { - return actual, false, false - } - if p != nil { - return *(**Counter)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *counterMap) LoadAndDelete(key string) (value *Counter, loaded bool) { - read, _ := m.read.Load().(readOnlyCounterMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCounterMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *counterMap) Delete(key string) { - m.LoadAndDelete(key) -} - -func (e *entryCounterMap) delete() (value *Counter, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedCounterMap { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(**Counter)(p), true - } - } -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *counterMap) Range(f func(key string, value *Counter) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read, _ := m.read.Load().(readOnlyCounterMap) - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCounterMap) - if read.amended { - read = readOnlyCounterMap{m: m.dirty} - m.read.Store(read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *counterMap) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyCounterMap{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *counterMap) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyCounterMap) - m.dirty = make(map[string]*entryCounterMap, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryCounterMap) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedCounterMap) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedCounterMap -} diff --git a/maps_gauge.gen.go b/maps_gauge.gen.go deleted file mode 100644 index a1293cb..0000000 --- a/maps_gauge.gen.go +++ /dev/null @@ -1,388 +0,0 @@ -// Code generated by syncmap; DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statter - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -// Map is like a Go map[interface{}]interface{} but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. -// Loads, stores, and deletes run in amortized constant time. -// -// The Map type is specialized. Most code should use a plain Go map instead, -// with separate locking or coordination, for better type safety and to make it -// easier to maintain other invariants along with the map content. -// -// The Map type is optimized for two common use cases: (1) when the entry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate Mutex or RWMutex. -// -// The zero Map is empty and ready for use. A Map must not be copied after first use. -type gaugeMap struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Value // readOnly - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[string]*entryGaugeMap - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnlyGaugeMap struct { - m map[string]*entryGaugeMap - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expungedGaugeMap = unsafe.Pointer(new(*Gauge)) - -// An entry is a slot in the map corresponding to a particular key. -type entryGaugeMap struct { - // p points to the interface{} value stored for the entry. - // - // If p == nil, the entry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer // *interface{} -} - -func newEntryGaugeMap(i *Gauge) *entryGaugeMap { - return &entryGaugeMap{p: unsafe.Pointer(&i)} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *gaugeMap) Load(key string) (value *Gauge, ok bool) { - read, _ := m.read.Load().(readOnlyGaugeMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read, _ = m.read.Load().(readOnlyGaugeMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return value, false - } - return e.load() -} - -func (e *entryGaugeMap) load() (value *Gauge, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedGaugeMap { - return value, false - } - return *(**Gauge)(p), true -} - -// Store sets the value for a key. -func (m *gaugeMap) Store(key string, value *Gauge) { - read, _ := m.read.Load().(readOnlyGaugeMap) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyGaugeMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - e.storeLocked(&value) - } else if e, ok := m.dirty[key]; ok { - e.storeLocked(&value) - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyGaugeMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryGaugeMap(value) - } - m.mu.Unlock() -} - -// tryStore stores a value if the entry has not been expunged. -// -// If the entry is expunged, tryStore returns false and leaves the entry -// unchanged. -func (e *entryGaugeMap) tryStore(i **Gauge) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedGaugeMap { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entryGaugeMap) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedGaugeMap, nil) -} - -// storeLocked unconditionally stores a value to the entry. -// -// The entry must be known not to be expunged. -func (e *entryGaugeMap) storeLocked(i **Gauge) { - atomic.StorePointer(&e.p, unsafe.Pointer(i)) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *gaugeMap) LoadOrStore(key string, value *Gauge) (actual *Gauge, loaded bool) { - // Avoid locking if it's a clean hit. - read, _ := m.read.Load().(readOnlyGaugeMap) - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyGaugeMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyGaugeMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryGaugeMap(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entryGaugeMap) tryLoadOrStore(i *Gauge) (actual *Gauge, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expungedGaugeMap { - return actual, false, false - } - if p != nil { - return *(**Gauge)(p), true, true - } - - // Copy the interface after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expungedGaugeMap { - return actual, false, false - } - if p != nil { - return *(**Gauge)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *gaugeMap) LoadAndDelete(key string) (value *Gauge, loaded bool) { - read, _ := m.read.Load().(readOnlyGaugeMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyGaugeMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *gaugeMap) Delete(key string) { - m.LoadAndDelete(key) -} - -func (e *entryGaugeMap) delete() (value *Gauge, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedGaugeMap { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(**Gauge)(p), true - } - } -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *gaugeMap) Range(f func(key string, value *Gauge) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read, _ := m.read.Load().(readOnlyGaugeMap) - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read, _ = m.read.Load().(readOnlyGaugeMap) - if read.amended { - read = readOnlyGaugeMap{m: m.dirty} - m.read.Store(read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *gaugeMap) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyGaugeMap{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *gaugeMap) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyGaugeMap) - m.dirty = make(map[string]*entryGaugeMap, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryGaugeMap) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedGaugeMap) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedGaugeMap -} diff --git a/maps_histogram.gen.go b/maps_histogram.gen.go deleted file mode 100644 index 85bb18a..0000000 --- a/maps_histogram.gen.go +++ /dev/null @@ -1,388 +0,0 @@ -// Code generated by syncmap; DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statter - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -// Map is like a Go map[interface{}]interface{} but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. -// Loads, stores, and deletes run in amortized constant time. -// -// The Map type is specialized. Most code should use a plain Go map instead, -// with separate locking or coordination, for better type safety and to make it -// easier to maintain other invariants along with the map content. -// -// The Map type is optimized for two common use cases: (1) when the entry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate Mutex or RWMutex. -// -// The zero Map is empty and ready for use. A Map must not be copied after first use. -type histogramMap struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Value // readOnly - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[string]*entryHistogramMap - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnlyHistogramMap struct { - m map[string]*entryHistogramMap - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expungedHistogramMap = unsafe.Pointer(new(*Histogram)) - -// An entry is a slot in the map corresponding to a particular key. -type entryHistogramMap struct { - // p points to the interface{} value stored for the entry. - // - // If p == nil, the entry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer // *interface{} -} - -func newEntryHistogramMap(i *Histogram) *entryHistogramMap { - return &entryHistogramMap{p: unsafe.Pointer(&i)} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *histogramMap) Load(key string) (value *Histogram, ok bool) { - read, _ := m.read.Load().(readOnlyHistogramMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read, _ = m.read.Load().(readOnlyHistogramMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return value, false - } - return e.load() -} - -func (e *entryHistogramMap) load() (value *Histogram, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedHistogramMap { - return value, false - } - return *(**Histogram)(p), true -} - -// Store sets the value for a key. -func (m *histogramMap) Store(key string, value *Histogram) { - read, _ := m.read.Load().(readOnlyHistogramMap) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyHistogramMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - e.storeLocked(&value) - } else if e, ok := m.dirty[key]; ok { - e.storeLocked(&value) - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyHistogramMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryHistogramMap(value) - } - m.mu.Unlock() -} - -// tryStore stores a value if the entry has not been expunged. -// -// If the entry is expunged, tryStore returns false and leaves the entry -// unchanged. -func (e *entryHistogramMap) tryStore(i **Histogram) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedHistogramMap { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entryHistogramMap) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedHistogramMap, nil) -} - -// storeLocked unconditionally stores a value to the entry. -// -// The entry must be known not to be expunged. -func (e *entryHistogramMap) storeLocked(i **Histogram) { - atomic.StorePointer(&e.p, unsafe.Pointer(i)) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *histogramMap) LoadOrStore(key string, value *Histogram) (actual *Histogram, loaded bool) { - // Avoid locking if it's a clean hit. - read, _ := m.read.Load().(readOnlyHistogramMap) - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyHistogramMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyHistogramMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryHistogramMap(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entryHistogramMap) tryLoadOrStore(i *Histogram) (actual *Histogram, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expungedHistogramMap { - return actual, false, false - } - if p != nil { - return *(**Histogram)(p), true, true - } - - // Copy the interface after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expungedHistogramMap { - return actual, false, false - } - if p != nil { - return *(**Histogram)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *histogramMap) LoadAndDelete(key string) (value *Histogram, loaded bool) { - read, _ := m.read.Load().(readOnlyHistogramMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyHistogramMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *histogramMap) Delete(key string) { - m.LoadAndDelete(key) -} - -func (e *entryHistogramMap) delete() (value *Histogram, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedHistogramMap { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(**Histogram)(p), true - } - } -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *histogramMap) Range(f func(key string, value *Histogram) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read, _ := m.read.Load().(readOnlyHistogramMap) - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read, _ = m.read.Load().(readOnlyHistogramMap) - if read.amended { - read = readOnlyHistogramMap{m: m.dirty} - m.read.Store(read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *histogramMap) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyHistogramMap{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *histogramMap) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyHistogramMap) - m.dirty = make(map[string]*entryHistogramMap, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryHistogramMap) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedHistogramMap) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedHistogramMap -} diff --git a/maps_timing.gen.go b/maps_timing.gen.go deleted file mode 100644 index ba626b0..0000000 --- a/maps_timing.gen.go +++ /dev/null @@ -1,388 +0,0 @@ -// Code generated by syncmap; DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package statter - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -// Map is like a Go map[interface{}]interface{} but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. -// Loads, stores, and deletes run in amortized constant time. -// -// The Map type is specialized. Most code should use a plain Go map instead, -// with separate locking or coordination, for better type safety and to make it -// easier to maintain other invariants along with the map content. -// -// The Map type is optimized for two common use cases: (1) when the entry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate Mutex or RWMutex. -// -// The zero Map is empty and ready for use. A Map must not be copied after first use. -type timingMap struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Value // readOnly - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[string]*entryTimingMap - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnlyTimingMap struct { - m map[string]*entryTimingMap - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expungedTimingMap = unsafe.Pointer(new(*Timing)) - -// An entry is a slot in the map corresponding to a particular key. -type entryTimingMap struct { - // p points to the interface{} value stored for the entry. - // - // If p == nil, the entry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer // *interface{} -} - -func newEntryTimingMap(i *Timing) *entryTimingMap { - return &entryTimingMap{p: unsafe.Pointer(&i)} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *timingMap) Load(key string) (value *Timing, ok bool) { - read, _ := m.read.Load().(readOnlyTimingMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read, _ = m.read.Load().(readOnlyTimingMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return value, false - } - return e.load() -} - -func (e *entryTimingMap) load() (value *Timing, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedTimingMap { - return value, false - } - return *(**Timing)(p), true -} - -// Store sets the value for a key. -func (m *timingMap) Store(key string, value *Timing) { - read, _ := m.read.Load().(readOnlyTimingMap) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyTimingMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - e.storeLocked(&value) - } else if e, ok := m.dirty[key]; ok { - e.storeLocked(&value) - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyTimingMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryTimingMap(value) - } - m.mu.Unlock() -} - -// tryStore stores a value if the entry has not been expunged. -// -// If the entry is expunged, tryStore returns false and leaves the entry -// unchanged. -func (e *entryTimingMap) tryStore(i **Timing) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedTimingMap { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entryTimingMap) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedTimingMap, nil) -} - -// storeLocked unconditionally stores a value to the entry. -// -// The entry must be known not to be expunged. -func (e *entryTimingMap) storeLocked(i **Timing) { - atomic.StorePointer(&e.p, unsafe.Pointer(i)) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *timingMap) LoadOrStore(key string, value *Timing) (actual *Timing, loaded bool) { - // Avoid locking if it's a clean hit. - read, _ := m.read.Load().(readOnlyTimingMap) - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyTimingMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyTimingMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryTimingMap(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entryTimingMap) tryLoadOrStore(i *Timing) (actual *Timing, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expungedTimingMap { - return actual, false, false - } - if p != nil { - return *(**Timing)(p), true, true - } - - // Copy the interface after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expungedTimingMap { - return actual, false, false - } - if p != nil { - return *(**Timing)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *timingMap) LoadAndDelete(key string) (value *Timing, loaded bool) { - read, _ := m.read.Load().(readOnlyTimingMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyTimingMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *timingMap) Delete(key string) { - m.LoadAndDelete(key) -} - -func (e *entryTimingMap) delete() (value *Timing, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedTimingMap { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(**Timing)(p), true - } - } -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *timingMap) Range(f func(key string, value *Timing) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read, _ := m.read.Load().(readOnlyTimingMap) - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read, _ = m.read.Load().(readOnlyTimingMap) - if read.amended { - read = readOnlyTimingMap{m: m.dirty} - m.read.Store(read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *timingMap) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyTimingMap{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *timingMap) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyTimingMap) - m.dirty = make(map[string]*entryTimingMap, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryTimingMap) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedTimingMap) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedTimingMap -} diff --git a/reporter/prometheus/maps_buckets.gen.go b/reporter/prometheus/maps_buckets.gen.go deleted file mode 100644 index da396c3..0000000 --- a/reporter/prometheus/maps_buckets.gen.go +++ /dev/null @@ -1,388 +0,0 @@ -// Code generated by syncmap; DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package prometheus - -import ( - "sync" - "sync/atomic" - "unsafe" -) - -// Map is like a Go map[interface{}]interface{} but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. -// Loads, stores, and deletes run in amortized constant time. -// -// The Map type is specialized. Most code should use a plain Go map instead, -// with separate locking or coordination, for better type safety and to make it -// easier to maintain other invariants along with the map content. -// -// The Map type is optimized for two common use cases: (1) when the entry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate Mutex or RWMutex. -// -// The zero Map is empty and ready for use. A Map must not be copied after first use. -type bucketMap struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Value // readOnly - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[string]*entryBucketMap - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnlyBucketMap struct { - m map[string]*entryBucketMap - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expungedBucketMap = unsafe.Pointer(new([]float64)) - -// An entry is a slot in the map corresponding to a particular key. -type entryBucketMap struct { - // p points to the interface{} value stored for the entry. - // - // If p == nil, the entry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer // *interface{} -} - -func newEntryBucketMap(i []float64) *entryBucketMap { - return &entryBucketMap{p: unsafe.Pointer(&i)} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *bucketMap) Load(key string) (value []float64, ok bool) { - read, _ := m.read.Load().(readOnlyBucketMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read, _ = m.read.Load().(readOnlyBucketMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return value, false - } - return e.load() -} - -func (e *entryBucketMap) load() (value []float64, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedBucketMap { - return value, false - } - return *(*[]float64)(p), true -} - -// Store sets the value for a key. -func (m *bucketMap) Store(key string, value []float64) { - read, _ := m.read.Load().(readOnlyBucketMap) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyBucketMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - e.storeLocked(&value) - } else if e, ok := m.dirty[key]; ok { - e.storeLocked(&value) - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyBucketMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryBucketMap(value) - } - m.mu.Unlock() -} - -// tryStore stores a value if the entry has not been expunged. -// -// If the entry is expunged, tryStore returns false and leaves the entry -// unchanged. -func (e *entryBucketMap) tryStore(i *[]float64) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedBucketMap { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entryBucketMap) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedBucketMap, nil) -} - -// storeLocked unconditionally stores a value to the entry. -// -// The entry must be known not to be expunged. -func (e *entryBucketMap) storeLocked(i *[]float64) { - atomic.StorePointer(&e.p, unsafe.Pointer(i)) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *bucketMap) LoadOrStore(key string, value []float64) (actual []float64, loaded bool) { - // Avoid locking if it's a clean hit. - read, _ := m.read.Load().(readOnlyBucketMap) - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyBucketMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyBucketMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryBucketMap(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entryBucketMap) tryLoadOrStore(i []float64) (actual []float64, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expungedBucketMap { - return actual, false, false - } - if p != nil { - return *(*[]float64)(p), true, true - } - - // Copy the interface after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expungedBucketMap { - return actual, false, false - } - if p != nil { - return *(*[]float64)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *bucketMap) LoadAndDelete(key string) (value []float64, loaded bool) { - read, _ := m.read.Load().(readOnlyBucketMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyBucketMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *bucketMap) Delete(key string) { - m.LoadAndDelete(key) -} - -func (e *entryBucketMap) delete() (value []float64, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedBucketMap { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(*[]float64)(p), true - } - } -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *bucketMap) Range(f func(key string, value []float64) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read, _ := m.read.Load().(readOnlyBucketMap) - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read, _ = m.read.Load().(readOnlyBucketMap) - if read.amended { - read = readOnlyBucketMap{m: m.dirty} - m.read.Store(read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *bucketMap) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyBucketMap{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *bucketMap) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyBucketMap) - m.dirty = make(map[string]*entryBucketMap, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryBucketMap) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedBucketMap) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedBucketMap -} diff --git a/reporter/prometheus/maps_counter.gen.go b/reporter/prometheus/maps_counter.gen.go deleted file mode 100644 index 769a46c..0000000 --- a/reporter/prometheus/maps_counter.gen.go +++ /dev/null @@ -1,390 +0,0 @@ -// Code generated by syncmap; DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package prometheus - -import ( - "sync" - "sync/atomic" - "unsafe" - - "github.com/prometheus/client_golang/prometheus" -) - -// Map is like a Go map[interface{}]interface{} but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. -// Loads, stores, and deletes run in amortized constant time. -// -// The Map type is specialized. Most code should use a plain Go map instead, -// with separate locking or coordination, for better type safety and to make it -// easier to maintain other invariants along with the map content. -// -// The Map type is optimized for two common use cases: (1) when the entry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate Mutex or RWMutex. -// -// The zero Map is empty and ready for use. A Map must not be copied after first use. -type counterMap struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Value // readOnly - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[string]*entryCounterMap - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnlyCounterMap struct { - m map[string]*entryCounterMap - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expungedCounterMap = unsafe.Pointer(new(*prometheus.CounterVec)) - -// An entry is a slot in the map corresponding to a particular key. -type entryCounterMap struct { - // p points to the interface{} value stored for the entry. - // - // If p == nil, the entry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer // *interface{} -} - -func newEntryCounterMap(i *prometheus.CounterVec) *entryCounterMap { - return &entryCounterMap{p: unsafe.Pointer(&i)} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *counterMap) Load(key string) (value *prometheus.CounterVec, ok bool) { - read, _ := m.read.Load().(readOnlyCounterMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read, _ = m.read.Load().(readOnlyCounterMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return value, false - } - return e.load() -} - -func (e *entryCounterMap) load() (value *prometheus.CounterVec, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedCounterMap { - return value, false - } - return *(**prometheus.CounterVec)(p), true -} - -// Store sets the value for a key. -func (m *counterMap) Store(key string, value *prometheus.CounterVec) { - read, _ := m.read.Load().(readOnlyCounterMap) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCounterMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - e.storeLocked(&value) - } else if e, ok := m.dirty[key]; ok { - e.storeLocked(&value) - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyCounterMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryCounterMap(value) - } - m.mu.Unlock() -} - -// tryStore stores a value if the entry has not been expunged. -// -// If the entry is expunged, tryStore returns false and leaves the entry -// unchanged. -func (e *entryCounterMap) tryStore(i **prometheus.CounterVec) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedCounterMap { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entryCounterMap) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedCounterMap, nil) -} - -// storeLocked unconditionally stores a value to the entry. -// -// The entry must be known not to be expunged. -func (e *entryCounterMap) storeLocked(i **prometheus.CounterVec) { - atomic.StorePointer(&e.p, unsafe.Pointer(i)) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *counterMap) LoadOrStore(key string, value *prometheus.CounterVec) (actual *prometheus.CounterVec, loaded bool) { - // Avoid locking if it's a clean hit. - read, _ := m.read.Load().(readOnlyCounterMap) - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCounterMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyCounterMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryCounterMap(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entryCounterMap) tryLoadOrStore(i *prometheus.CounterVec) (actual *prometheus.CounterVec, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expungedCounterMap { - return actual, false, false - } - if p != nil { - return *(**prometheus.CounterVec)(p), true, true - } - - // Copy the interface after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expungedCounterMap { - return actual, false, false - } - if p != nil { - return *(**prometheus.CounterVec)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *counterMap) LoadAndDelete(key string) (value *prometheus.CounterVec, loaded bool) { - read, _ := m.read.Load().(readOnlyCounterMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCounterMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *counterMap) Delete(key string) { - m.LoadAndDelete(key) -} - -func (e *entryCounterMap) delete() (value *prometheus.CounterVec, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedCounterMap { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(**prometheus.CounterVec)(p), true - } - } -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *counterMap) Range(f func(key string, value *prometheus.CounterVec) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read, _ := m.read.Load().(readOnlyCounterMap) - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read, _ = m.read.Load().(readOnlyCounterMap) - if read.amended { - read = readOnlyCounterMap{m: m.dirty} - m.read.Store(read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *counterMap) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyCounterMap{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *counterMap) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyCounterMap) - m.dirty = make(map[string]*entryCounterMap, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryCounterMap) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedCounterMap) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedCounterMap -} diff --git a/reporter/prometheus/maps_gauge.gen.go b/reporter/prometheus/maps_gauge.gen.go deleted file mode 100644 index 5a325db..0000000 --- a/reporter/prometheus/maps_gauge.gen.go +++ /dev/null @@ -1,390 +0,0 @@ -// Code generated by syncmap; DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package prometheus - -import ( - "sync" - "sync/atomic" - "unsafe" - - "github.com/prometheus/client_golang/prometheus" -) - -// Map is like a Go map[interface{}]interface{} but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. -// Loads, stores, and deletes run in amortized constant time. -// -// The Map type is specialized. Most code should use a plain Go map instead, -// with separate locking or coordination, for better type safety and to make it -// easier to maintain other invariants along with the map content. -// -// The Map type is optimized for two common use cases: (1) when the entry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate Mutex or RWMutex. -// -// The zero Map is empty and ready for use. A Map must not be copied after first use. -type gaugeMap struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Value // readOnly - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[string]*entryGaugeMap - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnlyGaugeMap struct { - m map[string]*entryGaugeMap - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expungedGaugeMap = unsafe.Pointer(new(*prometheus.GaugeVec)) - -// An entry is a slot in the map corresponding to a particular key. -type entryGaugeMap struct { - // p points to the interface{} value stored for the entry. - // - // If p == nil, the entry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer // *interface{} -} - -func newEntryGaugeMap(i *prometheus.GaugeVec) *entryGaugeMap { - return &entryGaugeMap{p: unsafe.Pointer(&i)} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *gaugeMap) Load(key string) (value *prometheus.GaugeVec, ok bool) { - read, _ := m.read.Load().(readOnlyGaugeMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read, _ = m.read.Load().(readOnlyGaugeMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return value, false - } - return e.load() -} - -func (e *entryGaugeMap) load() (value *prometheus.GaugeVec, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedGaugeMap { - return value, false - } - return *(**prometheus.GaugeVec)(p), true -} - -// Store sets the value for a key. -func (m *gaugeMap) Store(key string, value *prometheus.GaugeVec) { - read, _ := m.read.Load().(readOnlyGaugeMap) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyGaugeMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - e.storeLocked(&value) - } else if e, ok := m.dirty[key]; ok { - e.storeLocked(&value) - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyGaugeMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryGaugeMap(value) - } - m.mu.Unlock() -} - -// tryStore stores a value if the entry has not been expunged. -// -// If the entry is expunged, tryStore returns false and leaves the entry -// unchanged. -func (e *entryGaugeMap) tryStore(i **prometheus.GaugeVec) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedGaugeMap { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entryGaugeMap) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedGaugeMap, nil) -} - -// storeLocked unconditionally stores a value to the entry. -// -// The entry must be known not to be expunged. -func (e *entryGaugeMap) storeLocked(i **prometheus.GaugeVec) { - atomic.StorePointer(&e.p, unsafe.Pointer(i)) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *gaugeMap) LoadOrStore(key string, value *prometheus.GaugeVec) (actual *prometheus.GaugeVec, loaded bool) { - // Avoid locking if it's a clean hit. - read, _ := m.read.Load().(readOnlyGaugeMap) - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyGaugeMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyGaugeMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryGaugeMap(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entryGaugeMap) tryLoadOrStore(i *prometheus.GaugeVec) (actual *prometheus.GaugeVec, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expungedGaugeMap { - return actual, false, false - } - if p != nil { - return *(**prometheus.GaugeVec)(p), true, true - } - - // Copy the interface after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expungedGaugeMap { - return actual, false, false - } - if p != nil { - return *(**prometheus.GaugeVec)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *gaugeMap) LoadAndDelete(key string) (value *prometheus.GaugeVec, loaded bool) { - read, _ := m.read.Load().(readOnlyGaugeMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyGaugeMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *gaugeMap) Delete(key string) { - m.LoadAndDelete(key) -} - -func (e *entryGaugeMap) delete() (value *prometheus.GaugeVec, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedGaugeMap { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(**prometheus.GaugeVec)(p), true - } - } -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *gaugeMap) Range(f func(key string, value *prometheus.GaugeVec) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read, _ := m.read.Load().(readOnlyGaugeMap) - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read, _ = m.read.Load().(readOnlyGaugeMap) - if read.amended { - read = readOnlyGaugeMap{m: m.dirty} - m.read.Store(read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *gaugeMap) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyGaugeMap{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *gaugeMap) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyGaugeMap) - m.dirty = make(map[string]*entryGaugeMap, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryGaugeMap) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedGaugeMap) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedGaugeMap -} diff --git a/reporter/prometheus/maps_histogram.gen.go b/reporter/prometheus/maps_histogram.gen.go deleted file mode 100644 index 36af4f1..0000000 --- a/reporter/prometheus/maps_histogram.gen.go +++ /dev/null @@ -1,390 +0,0 @@ -// Code generated by syncmap; DO NOT EDIT. - -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package prometheus - -import ( - "sync" - "sync/atomic" - "unsafe" - - "github.com/prometheus/client_golang/prometheus" -) - -// Map is like a Go map[interface{}]interface{} but is safe for concurrent use -// by multiple goroutines without additional locking or coordination. -// Loads, stores, and deletes run in amortized constant time. -// -// The Map type is specialized. Most code should use a plain Go map instead, -// with separate locking or coordination, for better type safety and to make it -// easier to maintain other invariants along with the map content. -// -// The Map type is optimized for two common use cases: (1) when the entry for a given -// key is only ever written once but read many times, as in caches that only grow, -// or (2) when multiple goroutines read, write, and overwrite entries for disjoint -// sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate Mutex or RWMutex. -// -// The zero Map is empty and ready for use. A Map must not be copied after first use. -type histogramMap struct { - mu sync.Mutex - - // read contains the portion of the map's contents that are safe for - // concurrent access (with or without mu held). - // - // The read field itself is always safe to load, but must only be stored with - // mu held. - // - // Entries stored in read may be updated concurrently without mu, but updating - // a previously-expunged entry requires that the entry be copied to the dirty - // map and unexpunged with mu held. - read atomic.Value // readOnly - - // dirty contains the portion of the map's contents that require mu to be - // held. To ensure that the dirty map can be promoted to the read map quickly, - // it also includes all of the non-expunged entries in the read map. - // - // Expunged entries are not stored in the dirty map. An expunged entry in the - // clean map must be unexpunged and added to the dirty map before a new value - // can be stored to it. - // - // If the dirty map is nil, the next write to the map will initialize it by - // making a shallow copy of the clean map, omitting stale entries. - dirty map[string]*entryHistogramMap - - // misses counts the number of loads since the read map was last updated that - // needed to lock mu to determine whether the key was present. - // - // Once enough misses have occurred to cover the cost of copying the dirty - // map, the dirty map will be promoted to the read map (in the unamended - // state) and the next store to the map will make a new dirty copy. - misses int -} - -// readOnly is an immutable struct stored atomically in the Map.read field. -type readOnlyHistogramMap struct { - m map[string]*entryHistogramMap - amended bool // true if the dirty map contains some key not in m. -} - -// expunged is an arbitrary pointer that marks entries which have been deleted -// from the dirty map. -var expungedHistogramMap = unsafe.Pointer(new(*prometheus.HistogramVec)) - -// An entry is a slot in the map corresponding to a particular key. -type entryHistogramMap struct { - // p points to the interface{} value stored for the entry. - // - // If p == nil, the entry has been deleted, and either m.dirty == nil or - // m.dirty[key] is e. - // - // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry - // is missing from m.dirty. - // - // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty - // != nil, in m.dirty[key]. - // - // An entry can be deleted by atomic replacement with nil: when m.dirty is - // next created, it will atomically replace nil with expunged and leave - // m.dirty[key] unset. - // - // An entry's associated value can be updated by atomic replacement, provided - // p != expunged. If p == expunged, an entry's associated value can be updated - // only after first setting m.dirty[key] = e so that lookups using the dirty - // map find the entry. - p unsafe.Pointer // *interface{} -} - -func newEntryHistogramMap(i *prometheus.HistogramVec) *entryHistogramMap { - return &entryHistogramMap{p: unsafe.Pointer(&i)} -} - -// Load returns the value stored in the map for a key, or nil if no -// value is present. -// The ok result indicates whether value was found in the map. -func (m *histogramMap) Load(key string) (value *prometheus.HistogramVec, ok bool) { - read, _ := m.read.Load().(readOnlyHistogramMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - // Avoid reporting a spurious miss if m.dirty got promoted while we were - // blocked on m.mu. (If further loads of the same key will not miss, it's - // not worth copying the dirty map for this key.) - read, _ = m.read.Load().(readOnlyHistogramMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if !ok { - return value, false - } - return e.load() -} - -func (e *entryHistogramMap) load() (value *prometheus.HistogramVec, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedHistogramMap { - return value, false - } - return *(**prometheus.HistogramVec)(p), true -} - -// Store sets the value for a key. -func (m *histogramMap) Store(key string, value *prometheus.HistogramVec) { - read, _ := m.read.Load().(readOnlyHistogramMap) - if e, ok := read.m[key]; ok && e.tryStore(&value) { - return - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyHistogramMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - // The entry was previously expunged, which implies that there is a - // non-nil dirty map and this entry is not in it. - m.dirty[key] = e - } - e.storeLocked(&value) - } else if e, ok := m.dirty[key]; ok { - e.storeLocked(&value) - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyHistogramMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryHistogramMap(value) - } - m.mu.Unlock() -} - -// tryStore stores a value if the entry has not been expunged. -// -// If the entry is expunged, tryStore returns false and leaves the entry -// unchanged. -func (e *entryHistogramMap) tryStore(i **prometheus.HistogramVec) bool { - for { - p := atomic.LoadPointer(&e.p) - if p == expungedHistogramMap { - return false - } - if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { - return true - } - } -} - -// unexpungeLocked ensures that the entry is not marked as expunged. -// -// If the entry was previously expunged, it must be added to the dirty map -// before m.mu is unlocked. -func (e *entryHistogramMap) unexpungeLocked() (wasExpunged bool) { - return atomic.CompareAndSwapPointer(&e.p, expungedHistogramMap, nil) -} - -// storeLocked unconditionally stores a value to the entry. -// -// The entry must be known not to be expunged. -func (e *entryHistogramMap) storeLocked(i **prometheus.HistogramVec) { - atomic.StorePointer(&e.p, unsafe.Pointer(i)) -} - -// LoadOrStore returns the existing value for the key if present. -// Otherwise, it stores and returns the given value. -// The loaded result is true if the value was loaded, false if stored. -func (m *histogramMap) LoadOrStore(key string, value *prometheus.HistogramVec) (actual *prometheus.HistogramVec, loaded bool) { - // Avoid locking if it's a clean hit. - read, _ := m.read.Load().(readOnlyHistogramMap) - if e, ok := read.m[key]; ok { - actual, loaded, ok := e.tryLoadOrStore(value) - if ok { - return actual, loaded - } - } - - m.mu.Lock() - read, _ = m.read.Load().(readOnlyHistogramMap) - if e, ok := read.m[key]; ok { - if e.unexpungeLocked() { - m.dirty[key] = e - } - actual, loaded, _ = e.tryLoadOrStore(value) - } else if e, ok := m.dirty[key]; ok { - actual, loaded, _ = e.tryLoadOrStore(value) - m.missLocked() - } else { - if !read.amended { - // We're adding the first new key to the dirty map. - // Make sure it is allocated and mark the read-only map as incomplete. - m.dirtyLocked() - m.read.Store(readOnlyHistogramMap{m: read.m, amended: true}) - } - m.dirty[key] = newEntryHistogramMap(value) - actual, loaded = value, false - } - m.mu.Unlock() - - return actual, loaded -} - -// tryLoadOrStore atomically loads or stores a value if the entry is not -// expunged. -// -// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and -// returns with ok==false. -func (e *entryHistogramMap) tryLoadOrStore(i *prometheus.HistogramVec) (actual *prometheus.HistogramVec, loaded, ok bool) { - p := atomic.LoadPointer(&e.p) - if p == expungedHistogramMap { - return actual, false, false - } - if p != nil { - return *(**prometheus.HistogramVec)(p), true, true - } - - // Copy the interface after the first load to make this method more amenable - // to escape analysis: if we hit the "load" path or the entry is expunged, we - // shouldn't bother heap-allocating. - ic := i - for { - if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { - return i, false, true - } - p = atomic.LoadPointer(&e.p) - if p == expungedHistogramMap { - return actual, false, false - } - if p != nil { - return *(**prometheus.HistogramVec)(p), true, true - } - } -} - -// LoadAndDelete deletes the value for a key, returning the previous value if any. -// The loaded result reports whether the key was present. -func (m *histogramMap) LoadAndDelete(key string) (value *prometheus.HistogramVec, loaded bool) { - read, _ := m.read.Load().(readOnlyHistogramMap) - e, ok := read.m[key] - if !ok && read.amended { - m.mu.Lock() - read, _ = m.read.Load().(readOnlyHistogramMap) - e, ok = read.m[key] - if !ok && read.amended { - e, ok = m.dirty[key] - delete(m.dirty, key) - // Regardless of whether the entry was present, record a miss: this key - // will take the slow path until the dirty map is promoted to the read - // map. - m.missLocked() - } - m.mu.Unlock() - } - if ok { - return e.delete() - } - return value, false -} - -// Delete deletes the value for a key. -func (m *histogramMap) Delete(key string) { - m.LoadAndDelete(key) -} - -func (e *entryHistogramMap) delete() (value *prometheus.HistogramVec, ok bool) { - for { - p := atomic.LoadPointer(&e.p) - if p == nil || p == expungedHistogramMap { - return value, false - } - if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(**prometheus.HistogramVec)(p), true - } - } -} - -// Range calls f sequentially for each key and value present in the map. -// If f returns false, range stops the iteration. -// -// Range does not necessarily correspond to any consistent snapshot of the Map's -// contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. -// -// Range may be O(N) with the number of elements in the map even if f returns -// false after a constant number of calls. -func (m *histogramMap) Range(f func(key string, value *prometheus.HistogramVec) bool) { - // We need to be able to iterate over all of the keys that were already - // present at the start of the call to Range. - // If read.amended is false, then read.m satisfies that property without - // requiring us to hold m.mu for a long time. - read, _ := m.read.Load().(readOnlyHistogramMap) - if read.amended { - // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) - // (assuming the caller does not break out early), so a call to Range - // amortizes an entire copy of the map: we can promote the dirty copy - // immediately! - m.mu.Lock() - read, _ = m.read.Load().(readOnlyHistogramMap) - if read.amended { - read = readOnlyHistogramMap{m: m.dirty} - m.read.Store(read) - m.dirty = nil - m.misses = 0 - } - m.mu.Unlock() - } - - for k, e := range read.m { - v, ok := e.load() - if !ok { - continue - } - if !f(k, v) { - break - } - } -} - -func (m *histogramMap) missLocked() { - m.misses++ - if m.misses < len(m.dirty) { - return - } - m.read.Store(readOnlyHistogramMap{m: m.dirty}) - m.dirty = nil - m.misses = 0 -} - -func (m *histogramMap) dirtyLocked() { - if m.dirty != nil { - return - } - - read, _ := m.read.Load().(readOnlyHistogramMap) - m.dirty = make(map[string]*entryHistogramMap, len(read.m)) - for k, e := range read.m { - if !e.tryExpungeLocked() { - m.dirty[k] = e - } - } -} - -func (e *entryHistogramMap) tryExpungeLocked() (isExpunged bool) { - p := atomic.LoadPointer(&e.p) - for p == nil { - if atomic.CompareAndSwapPointer(&e.p, nil, expungedHistogramMap) { - return true - } - p = atomic.LoadPointer(&e.p) - } - return p == expungedHistogramMap -} diff --git a/reporter/prometheus/prometheus.go b/reporter/prometheus/prometheus.go index 112ec3c..66cd768 100644 --- a/reporter/prometheus/prometheus.go +++ b/reporter/prometheus/prometheus.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/go4org/hashtriemap" "github.com/hamba/statter/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -29,13 +30,13 @@ type Prometheus struct { fqn *fqn defBuckets []float64 - buckets bucketMap + buckets hashtriemap.HashTrieMap[string, []float64] reg *prometheus.Registry - counters counterMap - gauges gaugeMap - histograms histogramMap - timings histogramMap + counters hashtriemap.HashTrieMap[string, *prometheus.CounterVec] + gauges hashtriemap.HashTrieMap[string, *prometheus.GaugeVec] + histograms hashtriemap.HashTrieMap[string, *prometheus.HistogramVec] + timings hashtriemap.HashTrieMap[string, *prometheus.HistogramVec] } // New returns a new prometheus reporter. diff --git a/statter.go b/statter.go index ae6fb8a..31cacef 100644 --- a/statter.go +++ b/statter.go @@ -8,6 +8,7 @@ import ( "sync/atomic" "time" + "github.com/go4org/hashtriemap" "github.com/hamba/statter/v2/internal/stats" ) @@ -118,10 +119,10 @@ type Statter struct { prefix string tags []Tag - counters counterMap - gauges gaugeMap - histograms histogramMap - timings timingMap + counters hashtriemap.HashTrieMap[string, *Counter] + gauges hashtriemap.HashTrieMap[string, *Gauge] + histograms hashtriemap.HashTrieMap[string, *Histogram] + timings hashtriemap.HashTrieMap[string, *Timing] } // New returns a statter. @@ -349,14 +350,13 @@ func (s *Statter) reportSample(name, suffix string, tags [][2]string, sample *st func (s *Statter) sampleKeys(name, suffix string) []string { prefix := name + "_" - keys := []string{ - prefix + "count", - prefix + "sum" + suffix, - prefix + "mean" + suffix, - prefix + "stddev" + suffix, - prefix + "min" + suffix, - prefix + "max" + suffix, - } + keys := make([]string, 0, 6+len(s.cfg.percentiles)) + keys = append(keys, prefix+"count") + keys = append(keys, prefix+"sum"+suffix) + keys = append(keys, prefix+"mean"+suffix) + keys = append(keys, prefix+"stddev"+suffix) + keys = append(keys, prefix+"min"+suffix) + keys = append(keys, prefix+"max"+suffix) for _, p := range s.cfg.percentiles { keys = append(keys, prefix+strconv.FormatFloat(p, 'g', -1, 64)+"p"+suffix)