>-xFEfkgpl<)NG3q{B9U`TK<-^7?0^urcMGWkW?{+yP#
zDzQ0b=~oS&B_$JgpkXVLa{1is-eouM;LeOn-012rFxpUmj`Z2wnc`UPT80*D2RXZW
zLosgbKc6*;=ymTtvqWe)C>uT42EgxSVWx^S9;61>fDFm(-|?xzZjL
zYB&xW(BtdIz$6hLA)QA`&bvpOW>ChMm&ka&8bkky0RAzfewi|RdHaNX4R3MD!bdYfhM>q@4_b|fz051I>FN~kbGESWsT*K`5
zggfHci=BTA*l>I#ZdeDGN+>qg`BRuf!(>4nNkp1AS8#z6X2^(ow#QG04Fj2p6=!q0
z$9O)3N+SraFkZ`5jG_w+ICc6w6?Lyw``$m7X*$E6T|F^{(r&A&$Dv{^&E>}?ET$x}
zFQL=s!n?b-ABaicCPKX%ky592O#-|Be{rsE(BmA&?p$3&Sn1|wP*T0AQ!+2=e%$63
zu~!KBYF?FKcDyunfukN%(8br2N)8^Lie4W1f)~fIJl-^k$-p$)SM3mGPD8=&SBe_&;oMyF7{s?maCnNGtUyURY
zuT|x*j9as=c@v@yy$>R_wr0dgLxM_T!eVD5*p(!WM@eG(ad(a2`!IJ#
zlr%a`Uso7x>3f@`u
zbT-Z2c1WwQTD`D~B9-ZpDcuB3ypXgqay;LEo>`3!1VZ>lD1x;spUFC7<+Or#3{3V_
z43oV-Oz72xtG@m>XhpFc)^M)@4&&UlouPPrP`Ktgh$#}-qF{{7B>3K1n#>j?Ao}j*k
zi=R>an3`Un7GJz;@l&&AwvCE0fry3nOONt5A>UOE46wVV3XEQs6hEutOpoo!p*>7~43CQX{hxaS#)gjnE5l1X6A9uR
z(yV&6fHIlNZd`}iaGmd<4U8IITu)g-bi4l|C@0!$+yQcH=-{F)&-2Keg3@PIwJE)K
z1oy%Y7PV=rW^HWo!!ltPUw`!f+wZa+8e(E%LM>t}s4|t2oV8Q6Ml?wuQsR)t9f%U}
zmMzyxrTU(g4_T2`71Zrhu(67U5eRx`vgqQ76JvdZdGi;c`6MEDJuKVC|GCkh+ZiBA
z;FAh#MvIGzzRRLAnlm{fN|0e6zR;fLU2={sFG4P1;1JHi0b6al@YZA}^eh89M#0rM
zLEfZCRq;JU-FtE{K!n
zB%c%s#C2BzKl|4~UNQiP8&7#hW!i8kYkiw)O{<$~xP%Hq3}@&52o`X?sPO-+Eq-22
zFjvP5^2#|HO^&5=*gGlA#Xp$xG?FYJL4%>J;^4))TmP1yBytr*`aAvzS{L%Xhf&%|
zq2Ghsn7H)`tJMv&|2u7dfLxaFs_X_V19YWQwVmClWInt-9`pGM?IJn0mcHFJ!Ox$4
z1SuA%sL|1>^By!@yaoGk9mW@`cogOovh)p#7N~+`EippP|LtNF_5pN3WA{SGhl6XO
z;PGaP03l$=J-U%=$j#25$cyB73;ux`{0V5H{d62bphOu28_c>?jJ0D_*)Q_%!+?f}
zX}i3=?E^*gFu;0c1eAgz-fhq|$adeEvtJ9MbUeT7LkcBF24YG5t&ZSLKHuFyXV<_S&$?M)ZTSAtG|saGbJgwl&5A5Q>p^pF$es*>EGufF2>
zR29jAgW+b{>tqgb$0q@W8HEh1zzz&Ia5Z9_yU+i9rVKvqz*S${{DF5oZS`g0(mV^Y
zi`(UcfPZs*xN>Qk~k9<`a`-9Z?!
zwSUJjvUN=R_CWMGkO}mG?g84N`ev_iT>k>?Os+s?-S;g*C(sm+r(1KG|H6Q6On?(R
zk1qktn<$*QwI@Uk`|dO}Tx(J*%u@+ZXhsX%7AL0=+0!gIqLqIiyCj8-hDI2u#qUS7
z{ypW^Jg+`z;aYtoXbPmiqF7xvP^hn{VCdtHCP@`p4=i!`V|)Ae%aQ#@E|F_%wYCVy
z7@K>F<6*VX^T#}zJCRFt9^9)I`A%gV7x34>(GJlIZ)3%S(i04bX;D
zB$5Du?mY(uV?RRE`*`@2I37H~rtqG~#5H2|qj>GA&?PoJ-fB_o?IVV&zVJGnZC6XZ
zn5GldflvQlngmiM!sGgIhO6+W)@4{2ylA7zBqL~HIz#Xm@Qc@%tlESDr6Kx0+Ba1i
zJb*35&pVX(eIEPMNTY=90R*?jIivWA#f-$)uB`ElYquCJ){h?1cD8IO_MZ!)Um~Zgb
zfXVxQ4wBY1*;XUiT>F7WEr8+I`v0fl;9jdbNK
z*PTCPa7<@r_13|~jl#jA4mV(^WbqO-h}Q}`5G3f{5j!xtH8am9b!8Lz()w@b5T1y3
z(2_S8kZcaHE?<)O09neLl~r=|X~`8e3}5?~HPK5EOh`y*{e|xO%Vx`*SL5aq8Dg*a
z>-ovk&U{qM>xoSK7_P6L*Qd=D+0C1@$72ftK9d?63pQB$xmG8b`}+%__aWKa+Z_MS
zTq+Rg=;+QH{+MTg_HZyFWjKV4g%$L&
zrN{J)+iiiA!ocrIdh@Ih%a4?yY3)WTeDg_92v&{8!_G}z*nu+wlR(Jv>HALbnCM)>
zkhS8VbGJVkz(sXE_E1`V5#sx|!={JK8DpNMVMoGfKG2is5-@FvHpK1^?7wvZE
z!K-MI+Au?uX3Pbm(Xp04gN(YeFHY^)-5_d8F-I}Nna^Z2hq|Mh
zOV?6{@JV{Qh~V_~
zFZVCaFxz(>1G@jdOPI(UPNykxG6oZbuKf!I>!<_-h}4Ziv%fGkgfOKm39c8>w3J*B
zV_DI~D|2C=2KA*Poxdbo`@It>z^<`9r~M$(Pz>|+`QHn^jOPp#2)T)BlusA`CH4Hz
zFtFx){Z}qplMf&J-=+rt4F4J)2q3~Xo6#BnzuJ?n>q0&ueHMj^deSOcc?EvNgr$Va
IKI{1WA3s+3)c^nh
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/minmax.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/minmax.go
deleted file mode 100644
index 95bd9c55b..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/fsm/minmax.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fsm
-
-// min and max implementation for integer
-
-func min(x, y int) int {
- if x < y {
- return x
- }
- return y
-}
-
-func max(x, y int) int {
- if x > y {
- return x
- }
- return y
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go
deleted file mode 100644
index 1471444e3..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper.go
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import (
- "fmt"
- "io/ioutil"
- "regexp"
- "sync"
- "time"
-
- "github.com/go-kit/log"
- "github.com/prometheus/client_golang/prometheus"
- "gopkg.in/yaml.v2"
-
- "github.com/prometheus/statsd_exporter/pkg/level"
- "github.com/prometheus/statsd_exporter/pkg/mapper/fsm"
-)
-
-var (
- // The first segment of a match cannot start with a number
- statsdMetricRE = `[a-zA-Z_]([a-zA-Z0-9_\-])*`
- // The subsequent segments of a match can start with a number
- // See https://github.com/prometheus/statsd_exporter/issues/328
- statsdMetricSubsequentRE = `[a-zA-Z0-9_]([a-zA-Z0-9_\-])*`
- templateReplaceRE = `(\$\{?\d+\}?)`
-
- metricLineRE = regexp.MustCompile(`^(\*|` + statsdMetricRE + `)(\.\*|\.` + statsdMetricSubsequentRE + `)*$`)
- metricNameRE = regexp.MustCompile(`^([a-zA-Z_]|` + templateReplaceRE + `)([a-zA-Z0-9_]|` + templateReplaceRE + `)*$`)
- labelNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]+$`)
-)
-
-type MetricMapper struct {
- Registerer prometheus.Registerer
- Defaults mapperConfigDefaults `yaml:"defaults"`
- Mappings []MetricMapping `yaml:"mappings"`
- FSM *fsm.FSM
- doFSM bool
- doRegex bool
- cache MetricMapperCache
- mutex sync.RWMutex
-
- MappingsCount prometheus.Gauge
-
- Logger log.Logger
-}
-
-type SummaryOptions struct {
- Quantiles []metricObjective `yaml:"quantiles"`
- MaxAge time.Duration `yaml:"max_age"`
- AgeBuckets uint32 `yaml:"age_buckets"`
- BufCap uint32 `yaml:"buf_cap"`
-}
-
-type HistogramOptions struct {
- Buckets []float64 `yaml:"buckets"`
-}
-
-type metricObjective struct {
- Quantile float64 `yaml:"quantile"`
- Error float64 `yaml:"error"`
-}
-
-var defaultQuantiles = []metricObjective{
- {Quantile: 0.5, Error: 0.05},
- {Quantile: 0.9, Error: 0.01},
- {Quantile: 0.99, Error: 0.001},
-}
-
-func (m *MetricMapper) InitFromYAMLString(fileContents string) error {
- var n MetricMapper
-
- if err := yaml.Unmarshal([]byte(fileContents), &n); err != nil {
- return err
- }
-
- if len(n.Defaults.HistogramOptions.Buckets) == 0 {
- n.Defaults.HistogramOptions.Buckets = prometheus.DefBuckets
- }
-
- if len(n.Defaults.SummaryOptions.Quantiles) == 0 {
- n.Defaults.SummaryOptions.Quantiles = defaultQuantiles
- }
-
- if n.Defaults.MatchType == MatchTypeDefault {
- n.Defaults.MatchType = MatchTypeGlob
- }
-
- remainingMappingsCount := len(n.Mappings)
-
- n.FSM = fsm.NewFSM([]string{string(MetricTypeCounter), string(MetricTypeGauge), string(MetricTypeObserver)},
- remainingMappingsCount, n.Defaults.GlobDisableOrdering)
-
- for i := range n.Mappings {
- remainingMappingsCount--
-
- currentMapping := &n.Mappings[i]
-
- // check that label is correct
- for k := range currentMapping.Labels {
- if !labelNameRE.MatchString(k) {
- return fmt.Errorf("invalid label key: %s", k)
- }
- }
-
- if currentMapping.Name == "" {
- return fmt.Errorf("line %d: metric mapping didn't set a metric name", i)
- }
-
- if !metricNameRE.MatchString(currentMapping.Name) {
- return fmt.Errorf("metric name '%s' doesn't match regex '%s'", currentMapping.Name, metricNameRE)
- }
-
- if currentMapping.MatchType == "" {
- currentMapping.MatchType = n.Defaults.MatchType
- }
-
- if currentMapping.Action == "" {
- currentMapping.Action = ActionTypeMap
- }
-
- if currentMapping.MatchType == MatchTypeGlob {
- n.doFSM = true
- if !metricLineRE.MatchString(currentMapping.Match) {
- return fmt.Errorf("invalid match: %s", currentMapping.Match)
- }
-
- captureCount := n.FSM.AddState(currentMapping.Match, string(currentMapping.MatchMetricType),
- remainingMappingsCount, currentMapping)
-
- currentMapping.nameFormatter = fsm.NewTemplateFormatter(currentMapping.Name, captureCount)
-
- labelKeys := make([]string, len(currentMapping.Labels))
- labelFormatters := make([]*fsm.TemplateFormatter, len(currentMapping.Labels))
- labelIndex := 0
- for label, valueExpr := range currentMapping.Labels {
- labelKeys[labelIndex] = label
- labelFormatters[labelIndex] = fsm.NewTemplateFormatter(valueExpr, captureCount)
- labelIndex++
- }
- currentMapping.labelFormatters = labelFormatters
- currentMapping.labelKeys = labelKeys
- } else {
- if regex, err := regexp.Compile(currentMapping.Match); err != nil {
- return fmt.Errorf("invalid regex %s in mapping: %v", currentMapping.Match, err)
- } else {
- currentMapping.regex = regex
- }
- n.doRegex = true
- }
-
- if currentMapping.ObserverType == "" {
- currentMapping.ObserverType = n.Defaults.ObserverType
- }
-
- if currentMapping.LegacyQuantiles != nil &&
- (currentMapping.SummaryOptions == nil || currentMapping.SummaryOptions.Quantiles != nil) {
- level.Warn(m.Logger).Log("msg", "using the top level quantiles is deprecated. Please use quantiles in the summary_options hierarchy")
- }
-
- if currentMapping.LegacyBuckets != nil &&
- (currentMapping.HistogramOptions == nil || currentMapping.HistogramOptions.Buckets != nil) {
- level.Warn(m.Logger).Log("msg", "using the top level buckets is deprecated. Please use buckets in the histogram_options hierarchy")
- }
-
- if currentMapping.SummaryOptions != nil &&
- currentMapping.LegacyQuantiles != nil &&
- currentMapping.SummaryOptions.Quantiles != nil {
- return fmt.Errorf("cannot use quantiles in both the top level and summary options at the same time in %s", currentMapping.Match)
- }
-
- if currentMapping.HistogramOptions != nil &&
- currentMapping.LegacyBuckets != nil &&
- currentMapping.HistogramOptions.Buckets != nil {
- return fmt.Errorf("cannot use buckets in both the top level and histogram options at the same time in %s", currentMapping.Match)
- }
-
- if currentMapping.ObserverType == ObserverTypeHistogram {
- if currentMapping.SummaryOptions != nil {
- return fmt.Errorf("cannot use histogram observer and summary options at the same time")
- }
- if currentMapping.HistogramOptions == nil {
- currentMapping.HistogramOptions = &HistogramOptions{}
- }
- if currentMapping.LegacyBuckets != nil && len(currentMapping.LegacyBuckets) != 0 {
- currentMapping.HistogramOptions.Buckets = currentMapping.LegacyBuckets
- }
- if currentMapping.HistogramOptions.Buckets == nil || len(currentMapping.HistogramOptions.Buckets) == 0 {
- currentMapping.HistogramOptions.Buckets = n.Defaults.HistogramOptions.Buckets
- }
- }
-
- if currentMapping.ObserverType == ObserverTypeSummary {
- if currentMapping.HistogramOptions != nil {
- return fmt.Errorf("cannot use summary observer and histogram options at the same time")
- }
- if currentMapping.SummaryOptions == nil {
- currentMapping.SummaryOptions = &SummaryOptions{}
- }
- if currentMapping.LegacyQuantiles != nil && len(currentMapping.LegacyQuantiles) != 0 {
- currentMapping.SummaryOptions.Quantiles = currentMapping.LegacyQuantiles
- }
- if currentMapping.SummaryOptions.Quantiles == nil || len(currentMapping.SummaryOptions.Quantiles) == 0 {
- currentMapping.SummaryOptions.Quantiles = n.Defaults.SummaryOptions.Quantiles
- }
- if currentMapping.SummaryOptions.MaxAge == 0 {
- currentMapping.SummaryOptions.MaxAge = n.Defaults.SummaryOptions.MaxAge
- }
- if currentMapping.SummaryOptions.AgeBuckets == 0 {
- currentMapping.SummaryOptions.AgeBuckets = n.Defaults.SummaryOptions.AgeBuckets
- }
- if currentMapping.SummaryOptions.BufCap == 0 {
- currentMapping.SummaryOptions.BufCap = n.Defaults.SummaryOptions.BufCap
- }
- }
-
- if currentMapping.Ttl == 0 && n.Defaults.Ttl > 0 {
- currentMapping.Ttl = n.Defaults.Ttl
- }
- }
-
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- m.Defaults = n.Defaults
- m.Mappings = n.Mappings
-
- // Reset the cache since this function can be used to reload config
- if m.cache != nil {
- m.cache.Reset()
- }
-
- if n.doFSM {
- var mappings []string
- for _, mapping := range n.Mappings {
- if mapping.MatchType == MatchTypeGlob {
- mappings = append(mappings, mapping.Match)
- }
- }
- n.FSM.BacktrackingNeeded = fsm.TestIfNeedBacktracking(mappings, n.FSM.OrderingDisabled, m.Logger)
-
- m.FSM = n.FSM
- m.doRegex = n.doRegex
- }
- m.doFSM = n.doFSM
-
- if m.MappingsCount != nil {
- m.MappingsCount.Set(float64(len(n.Mappings)))
- }
-
- if m.Logger == nil {
- m.Logger = log.NewNopLogger()
- }
-
- return nil
-}
-
-func (m *MetricMapper) InitFromFile(fileName string) error {
- mappingStr, err := ioutil.ReadFile(fileName)
- if err != nil {
- return err
- }
-
- return m.InitFromYAMLString(string(mappingStr))
-}
-
-// UseCache tells the mapper to use a cache that implements the MetricMapperCache interface.
-// This cache MUST be thread-safe!
-func (m *MetricMapper) UseCache(cache MetricMapperCache) {
- m.mutex.Lock()
- defer m.mutex.Unlock()
- m.cache = cache
-}
-
-func (m *MetricMapper) GetMapping(statsdMetric string, statsdMetricType MetricType) (*MetricMapping, prometheus.Labels, bool) {
- m.mutex.RLock()
- defer m.mutex.RUnlock()
-
- // only use a cache if one is present
- if m.cache != nil {
- result, cached := m.cache.Get(formatKey(statsdMetric, statsdMetricType))
- if cached {
- r := result.(MetricMapperCacheResult)
- return r.Mapping, r.Labels, r.Matched
- }
- }
-
- // glob matching
- if m.doFSM {
- finalState, captures := m.FSM.GetMapping(statsdMetric, string(statsdMetricType))
- if finalState != nil && finalState.Result != nil {
- v := finalState.Result.(*MetricMapping)
- result := copyMetricMapping(v)
- result.Name = result.nameFormatter.Format(captures)
-
- labels := prometheus.Labels{}
- for index, formatter := range result.labelFormatters {
- labels[result.labelKeys[index]] = formatter.Format(captures)
- }
-
- r := MetricMapperCacheResult{
- Mapping: result,
- Matched: true,
- Labels: labels,
- }
- // add match to cache
- if m.cache != nil {
- m.cache.Add(formatKey(statsdMetric, statsdMetricType), r)
- }
-
- return result, labels, true
- } else if !m.doRegex {
- // if there's no regex match type, return immediately
- // Add miss to cache
- if m.cache != nil {
- m.cache.Add(formatKey(statsdMetric, statsdMetricType), MetricMapperCacheResult{})
- }
- return nil, nil, false
- }
- }
-
- // regex matching
- for _, mapping := range m.Mappings {
- // if a rule don't have regex matching type, the regex field is unset
- if mapping.regex == nil {
- continue
- }
- matches := mapping.regex.FindStringSubmatchIndex(statsdMetric)
- if len(matches) == 0 {
- continue
- }
-
- mapping.Name = string(mapping.regex.ExpandString(
- []byte{},
- mapping.Name,
- statsdMetric,
- matches,
- ))
-
- if mt := mapping.MatchMetricType; mt != "" && mt != statsdMetricType {
- continue
- }
-
- labels := prometheus.Labels{}
- for label, valueExpr := range mapping.Labels {
- value := mapping.regex.ExpandString([]byte{}, valueExpr, statsdMetric, matches)
- labels[label] = string(value)
- }
-
- r := MetricMapperCacheResult{
- Mapping: &mapping,
- Matched: true,
- Labels: labels,
- }
- // Add Match to cache
- if m.cache != nil {
- m.cache.Add(formatKey(statsdMetric, statsdMetricType), r)
- }
-
- return &mapping, labels, true
- }
-
- // Add Miss to cache
- if m.cache != nil {
- m.cache.Add(formatKey(statsdMetric, statsdMetricType), MetricMapperCacheResult{})
- }
- return nil, nil, false
-}
-
-// make a shallow copy so that we do not overwrite name
-// as multiple names can be matched by same mapping
-func copyMetricMapping(in *MetricMapping) *MetricMapping {
- out := *in
- return &out
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go
deleted file mode 100644
index 9d65f8c2c..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_cache.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import (
- "github.com/prometheus/client_golang/prometheus"
-)
-
-type CacheMetrics struct {
- CacheLength prometheus.Gauge
- CacheGetsTotal prometheus.Counter
- CacheHitsTotal prometheus.Counter
-}
-
-func NewCacheMetrics(reg prometheus.Registerer) *CacheMetrics {
- var m CacheMetrics
-
- m.CacheLength = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Name: "statsd_metric_mapper_cache_length",
- Help: "The count of unique metrics currently cached.",
- },
- )
- m.CacheGetsTotal = prometheus.NewCounter(
- prometheus.CounterOpts{
- Name: "statsd_metric_mapper_cache_gets_total",
- Help: "The count of total metric cache gets.",
- },
- )
- m.CacheHitsTotal = prometheus.NewCounter(
- prometheus.CounterOpts{
- Name: "statsd_metric_mapper_cache_hits_total",
- Help: "The count of total metric cache hits.",
- },
- )
-
- if reg != nil {
- reg.MustRegister(m.CacheLength)
- reg.MustRegister(m.CacheGetsTotal)
- reg.MustRegister(m.CacheHitsTotal)
- }
- return &m
-}
-
-type MetricMapperCacheResult struct {
- Mapping *MetricMapping
- Matched bool
- Labels prometheus.Labels
-}
-
-// MetricMapperCache MUST be thread-safe and should be instrumented with CacheMetrics
-type MetricMapperCache interface {
- // Get a cached result
- Get(metricKey string) (interface{}, bool)
- // Add a statsd MetricMapperResult to the cache
- Add(metricKey string, result interface{}) // Add an item to the cache
- // Reset clears the cache for config reloads
- Reset()
-}
-
-func formatKey(metricString string, metricType MetricType) string {
- return string(metricType) + "." + metricString
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_defaults.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_defaults.go
deleted file mode 100644
index 754a677b7..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapper_defaults.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import "time"
-
-type mapperConfigDefaults struct {
- ObserverType ObserverType `yaml:"observer_type"`
- MatchType MatchType `yaml:"match_type"`
- GlobDisableOrdering bool `yaml:"glob_disable_ordering"`
- Ttl time.Duration `yaml:"ttl"`
- SummaryOptions SummaryOptions `yaml:"summary_options"`
- HistogramOptions HistogramOptions `yaml:"histogram_options"`
-}
-
-// mapperConfigDefaultsAlias is used to unmarshal the yaml config into mapperConfigDefaults and allows deprecated fields
-type mapperConfigDefaultsAlias struct {
- ObserverType ObserverType `yaml:"observer_type"`
- TimerType ObserverType `yaml:"timer_type,omitempty"` // DEPRECATED - field only present to preserve backwards compatibility in configs
- Buckets []float64 `yaml:"buckets"` // DEPRECATED - field only present to preserve backwards compatibility in configs
- Quantiles []metricObjective `yaml:"quantiles"` // DEPRECATED - field only present to preserve backwards compatibility in configs
- MatchType MatchType `yaml:"match_type"`
- GlobDisableOrdering bool `yaml:"glob_disable_ordering"`
- Ttl time.Duration `yaml:"ttl"`
- SummaryOptions SummaryOptions `yaml:"summary_options"`
- HistogramOptions HistogramOptions `yaml:"histogram_options"`
-}
-
-// UnmarshalYAML is a custom unmarshal function to allow use of deprecated config keys
-// observer_type will override timer_type
-func (d *mapperConfigDefaults) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var tmp mapperConfigDefaultsAlias
- if err := unmarshal(&tmp); err != nil {
- return err
- }
-
- // Copy defaults
- d.ObserverType = tmp.ObserverType
- d.MatchType = tmp.MatchType
- d.GlobDisableOrdering = tmp.GlobDisableOrdering
- d.Ttl = tmp.Ttl
- d.SummaryOptions = tmp.SummaryOptions
- d.HistogramOptions = tmp.HistogramOptions
-
- // Use deprecated TimerType if necessary
- if tmp.ObserverType == "" {
- d.ObserverType = tmp.TimerType
- }
-
- // Use deprecated quantiles if necessary
- if len(tmp.SummaryOptions.Quantiles) == 0 && len(tmp.Quantiles) > 0 {
- d.SummaryOptions = SummaryOptions{Quantiles: tmp.Quantiles}
- }
-
- // Use deprecated buckets if necessary
- if len(tmp.HistogramOptions.Buckets) == 0 && len(tmp.Buckets) > 0 {
- d.HistogramOptions = HistogramOptions{Buckets: tmp.Buckets}
- }
-
- return nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapping.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapping.go
deleted file mode 100644
index cdba27afb..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/mapping.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either xpress or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import (
- "regexp"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-
- "github.com/prometheus/statsd_exporter/pkg/mapper/fsm"
-)
-
-type MetricMapping struct {
- Match string `yaml:"match"`
- Name string `yaml:"name"`
- nameFormatter *fsm.TemplateFormatter
- regex *regexp.Regexp
- Labels prometheus.Labels `yaml:"labels"`
- labelKeys []string
- labelFormatters []*fsm.TemplateFormatter
- ObserverType ObserverType `yaml:"observer_type"`
- TimerType ObserverType `yaml:"timer_type,omitempty"` // DEPRECATED - field only present to preserve backwards compatibility in configs. Always empty
- LegacyBuckets []float64 `yaml:"buckets"`
- LegacyQuantiles []metricObjective `yaml:"quantiles"`
- MatchType MatchType `yaml:"match_type"`
- HelpText string `yaml:"help"`
- Action ActionType `yaml:"action"`
- MatchMetricType MetricType `yaml:"match_metric_type"`
- Ttl time.Duration `yaml:"ttl"`
- SummaryOptions *SummaryOptions `yaml:"summary_options"`
- HistogramOptions *HistogramOptions `yaml:"histogram_options"`
-}
-
-// UnmarshalYAML is a custom unmarshal function to allow use of deprecated config keys
-// observer_type will override timer_type
-func (m *MetricMapping) UnmarshalYAML(unmarshal func(interface{}) error) error {
- type MetricMappingAlias MetricMapping
- var tmp MetricMappingAlias
- if err := unmarshal(&tmp); err != nil {
- return err
- }
-
- // Copy defaults
- m.Match = tmp.Match
- m.Name = tmp.Name
- m.Labels = tmp.Labels
- m.ObserverType = tmp.ObserverType
- m.LegacyBuckets = tmp.LegacyBuckets
- m.LegacyQuantiles = tmp.LegacyQuantiles
- m.MatchType = tmp.MatchType
- m.HelpText = tmp.HelpText
- m.Action = tmp.Action
- m.MatchMetricType = tmp.MatchMetricType
- m.Ttl = tmp.Ttl
- m.SummaryOptions = tmp.SummaryOptions
- m.HistogramOptions = tmp.HistogramOptions
-
- // Use deprecated TimerType if necessary
- if tmp.ObserverType == "" {
- m.ObserverType = tmp.TimerType
- }
-
- return nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/match.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/match.go
deleted file mode 100644
index 12d5e8d6b..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/match.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import "fmt"
-
-type MatchType string
-
-const (
- MatchTypeGlob MatchType = "glob"
- MatchTypeRegex MatchType = "regex"
- MatchTypeDefault MatchType = ""
-)
-
-func (t *MatchType) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var v string
- if err := unmarshal(&v); err != nil {
- return err
- }
-
- switch MatchType(v) {
- case MatchTypeRegex:
- *t = MatchTypeRegex
- case MatchTypeGlob, MatchTypeDefault:
- *t = MatchTypeGlob
- default:
- return fmt.Errorf("invalid match type %q", v)
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go
deleted file mode 100644
index 920c16ed3..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/metric_type.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import "fmt"
-
-type MetricType string
-
-const (
- MetricTypeCounter MetricType = "counter"
- MetricTypeGauge MetricType = "gauge"
- MetricTypeObserver MetricType = "observer"
- MetricTypeTimer MetricType = "timer" // DEPRECATED
-)
-
-func (m *MetricType) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var v string
- if err := unmarshal(&v); err != nil {
- return err
- }
-
- switch MetricType(v) {
- case MetricTypeCounter:
- *m = MetricTypeCounter
- case MetricTypeGauge:
- *m = MetricTypeGauge
- case MetricTypeObserver:
- *m = MetricTypeObserver
- case MetricTypeTimer:
- *m = MetricTypeObserver
- default:
- return fmt.Errorf("invalid metric type '%s'", v)
- }
- return nil
-}
diff --git a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/observer.go b/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/observer.go
deleted file mode 100644
index 3d5da7eab..000000000
--- a/vendor/github.com/prometheus/statsd_exporter/pkg/mapper/observer.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mapper
-
-import "fmt"
-
-type ObserverType string
-
-const (
- ObserverTypeHistogram ObserverType = "histogram"
- ObserverTypeSummary ObserverType = "summary"
- ObserverTypeDefault ObserverType = ""
-)
-
-func (t *ObserverType) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var v string
- if err := unmarshal(&v); err != nil {
- return err
- }
-
- switch ObserverType(v) {
- case ObserverTypeHistogram:
- *t = ObserverTypeHistogram
- case ObserverTypeSummary, ObserverTypeDefault:
- *t = ObserverTypeSummary
- default:
- return fmt.Errorf("invalid observer type '%s'", v)
- }
- return nil
-}
diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore
deleted file mode 100644
index 74a6db472..000000000
--- a/vendor/go.opencensus.io/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-/.idea/
-
-# go.opencensus.io/exporter/aws
-/exporter/aws/
-
-# Exclude vendor, use dep ensure after checkout:
-/vendor/github.com/
-/vendor/golang.org/
-/vendor/google.golang.org/
diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS
deleted file mode 100644
index e491a9e7f..000000000
--- a/vendor/go.opencensus.io/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-Google Inc.
diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md
deleted file mode 100644
index 1ba3962c8..000000000
--- a/vendor/go.opencensus.io/CONTRIBUTING.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# How to contribute
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-## Contributor License Agreement
-
-Contributions to this project must be accompanied by a Contributor License
-Agreement. You (or your employer) retain the copyright to your contribution,
-this simply gives us permission to use and redistribute your contributions as
-part of the project. Head over to to see
-your current agreements on file or to sign a new one.
-
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
-
-## Code reviews
-
-All submissions, including submissions by project members, require review. We
-use GitHub pull requests for this purpose. Consult [GitHub Help] for more
-information on using pull requests.
-
-[GitHub Help]: https://help.github.com/articles/about-pull-requests/
-
-## Instructions
-
-Fork the repo, checkout the upstream repo to your GOPATH by:
-
-```
-$ go get -d go.opencensus.io
-```
-
-Add your fork as an origin:
-
-```
-cd $(go env GOPATH)/src/go.opencensus.io
-git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git
-```
-
-Run tests:
-
-```
-$ make install-tools # Only first time.
-$ make
-```
-
-Checkout a new branch, make modifications and push the branch to your fork:
-
-```
-$ git checkout -b feature
-# edit files
-$ git commit
-$ git push fork feature
-```
-
-Open a pull request against the main opencensus-go repo.
-
-## General Notes
-This project uses Appveyor and Travis for CI.
-
-The dependencies are managed with `go mod` if you work with the sources under your
-`$GOPATH` you need to set the environment variable `GO111MODULE=on`.
\ No newline at end of file
diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE
deleted file mode 100644
index 7a4a3ea24..000000000
--- a/vendor/go.opencensus.io/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
\ No newline at end of file
diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile
deleted file mode 100644
index d896edc99..000000000
--- a/vendor/go.opencensus.io/Makefile
+++ /dev/null
@@ -1,97 +0,0 @@
-# TODO: Fix this on windows.
-ALL_SRC := $(shell find . -name '*.go' \
- -not -path './vendor/*' \
- -not -path '*/gen-go/*' \
- -type f | sort)
-ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC))))
-
-GOTEST_OPT?=-v -race -timeout 30s
-GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic
-GOTEST=go test
-GOIMPORTS=goimports
-GOLINT=golint
-GOVET=go vet
-EMBEDMD=embedmd
-# TODO decide if we need to change these names.
-TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages"
-TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages"
-README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ')
-
-.DEFAULT_GOAL := imports-lint-vet-embedmd-test
-
-.PHONY: imports-lint-vet-embedmd-test
-imports-lint-vet-embedmd-test: imports lint vet embedmd test
-
-# TODO enable test-with-coverage in tavis
-.PHONY: travis-ci
-travis-ci: imports lint vet embedmd test test-386
-
-all-pkgs:
- @echo $(ALL_PKGS) | tr ' ' '\n' | sort
-
-all-srcs:
- @echo $(ALL_SRC) | tr ' ' '\n' | sort
-
-.PHONY: test
-test:
- $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS)
-
-.PHONY: test-386
-test-386:
- GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS)
-
-.PHONY: test-with-coverage
-test-with-coverage:
- $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS)
-
-.PHONY: imports
-imports:
- @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \
- if [ "$$IMPORTSOUT" ]; then \
- echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \
- echo "$$IMPORTSOUT\n"; \
- exit 1; \
- else \
- echo "Imports finished successfully"; \
- fi
-
-.PHONY: lint
-lint:
- @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \
- if [ "$$LINTOUT" ]; then \
- echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \
- echo "$$LINTOUT\n"; \
- exit 1; \
- else \
- echo "Lint finished successfully"; \
- fi
-
-.PHONY: vet
-vet:
- # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0"
- @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \
- if [ "$$VETOUT" ]; then \
- echo "$(GOVET) FAILED => go vet the following files:\n"; \
- echo "$$VETOUT\n"; \
- exit 1; \
- else \
- echo "Vet finished successfully"; \
- fi
-
-.PHONY: embedmd
-embedmd:
- @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \
- if [ "$$EMBEDMDOUT" ]; then \
- echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \
- echo "$$EMBEDMDOUT\n"; \
- exit 1; \
- else \
- echo "Embedmd finished successfully"; \
- fi
-
-.PHONY: install-tools
-install-tools:
- go install golang.org/x/lint/golint@latest
- go install golang.org/x/tools/cmd/cover@latest
- go install golang.org/x/tools/cmd/goimports@latest
- go install github.com/rakyll/embedmd@latest
diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md
deleted file mode 100644
index 1d7e83711..000000000
--- a/vendor/go.opencensus.io/README.md
+++ /dev/null
@@ -1,267 +0,0 @@
-# OpenCensus Libraries for Go
-
-[![Build Status][travis-image]][travis-url]
-[![Windows Build Status][appveyor-image]][appveyor-url]
-[![GoDoc][godoc-image]][godoc-url]
-[![Gitter chat][gitter-image]][gitter-url]
-
-OpenCensus Go is a Go implementation of OpenCensus, a toolkit for
-collecting application performance and behavior monitoring data.
-Currently it consists of three major components: tags, stats and tracing.
-
-#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289).
-
-## Installation
-
-```
-$ go get -u go.opencensus.io
-```
-
-The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy).
-The use of vendoring or a dependency management tool is recommended.
-
-## Prerequisites
-
-OpenCensus Go libraries require Go 1.8 or later.
-
-## Getting Started
-
-The easiest way to get started using OpenCensus in your application is to use an existing
-integration with your RPC framework:
-
-* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp)
-* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc)
-* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql)
-* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus)
-* [Groupcache](https://godoc.org/github.com/orijtech/groupcache)
-* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy)
-* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver)
-* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo)
-* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis)
-* [Memcache](https://godoc.org/github.com/orijtech/gomemcache)
-
-If you're using a framework not listed here, you could either implement your own middleware for your
-framework or use [custom stats](#stats) and [spans](#spans) directly in your application.
-
-## Exporters
-
-OpenCensus can export instrumentation data to various backends.
-OpenCensus has exporter implementations for the following, users
-can implement their own exporters by implementing the exporter interfaces
-([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter),
-[trace](https://godoc.org/go.opencensus.io/trace#Exporter)):
-
-* [Prometheus][exporter-prom] for stats
-* [OpenZipkin][exporter-zipkin] for traces
-* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces
-* [Jaeger][exporter-jaeger] for traces
-* [AWS X-Ray][exporter-xray] for traces
-* [Datadog][exporter-datadog] for stats and traces
-* [Graphite][exporter-graphite] for stats
-* [Honeycomb][exporter-honeycomb] for traces
-* [New Relic][exporter-newrelic] for stats and traces
-
-## Overview
-
-
-
-In a microservices environment, a user request may go through
-multiple services until there is a response. OpenCensus allows
-you to instrument your services and collect diagnostics data all
-through your services end-to-end.
-
-## Tags
-
-Tags represent propagated key-value pairs. They are propagated using `context.Context`
-in the same process or can be encoded to be transmitted on the wire. Usually, this will
-be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler`
-for gRPC.
-
-Package `tag` allows adding or modifying tags in the current context.
-
-[embedmd]:# (internal/readme/tags.go new)
-```go
-ctx, err := tag.New(ctx,
- tag.Insert(osKey, "macOS-10.12.5"),
- tag.Upsert(userIDKey, "cde36753ed"),
-)
-if err != nil {
- log.Fatal(err)
-}
-```
-
-## Stats
-
-OpenCensus is a low-overhead framework even if instrumentation is always enabled.
-In order to be so, it is optimized to make recording of data points fast
-and separate from the data aggregation.
-
-OpenCensus stats collection happens in two stages:
-
-* Definition of measures and recording of data points
-* Definition of views and aggregation of the recorded data
-
-### Recording
-
-Measurements are data points associated with a measure.
-Recording implicitly tags the set of Measurements with the tags from the
-provided context:
-
-[embedmd]:# (internal/readme/stats.go record)
-```go
-stats.Record(ctx, videoSize.M(102478))
-```
-
-### Views
-
-Views are how Measures are aggregated. You can think of them as queries over the
-set of recorded data points (measurements).
-
-Views have two parts: the tags to group by and the aggregation type used.
-
-Currently three types of aggregations are supported:
-* CountAggregation is used to count the number of times a sample was recorded.
-* DistributionAggregation is used to provide a histogram of the values of the samples.
-* SumAggregation is used to sum up all sample values.
-
-[embedmd]:# (internal/readme/stats.go aggs)
-```go
-distAgg := view.Distribution(1<<32, 2<<32, 3<<32)
-countAgg := view.Count()
-sumAgg := view.Sum()
-```
-
-Here we create a view with the DistributionAggregation over our measure.
-
-[embedmd]:# (internal/readme/stats.go view)
-```go
-if err := view.Register(&view.View{
- Name: "example.com/video_size_distribution",
- Description: "distribution of processed video size over time",
- Measure: videoSize,
- Aggregation: view.Distribution(1<<32, 2<<32, 3<<32),
-}); err != nil {
- log.Fatalf("Failed to register view: %v", err)
-}
-```
-
-Register begins collecting data for the view. Registered views' data will be
-exported via the registered exporters.
-
-## Traces
-
-A distributed trace tracks the progression of a single user request as
-it is handled by the services and processes that make up an application.
-Each step is called a span in the trace. Spans include metadata about the step,
-including especially the time spent in the step, called the span’s latency.
-
-Below you see a trace and several spans underneath it.
-
-
-
-### Spans
-
-Span is the unit step in a trace. Each span has a name, latency, status and
-additional metadata.
-
-Below we are starting a span for a cache read and ending it
-when we are done:
-
-[embedmd]:# (internal/readme/trace.go startend)
-```go
-ctx, span := trace.StartSpan(ctx, "cache.Get")
-defer span.End()
-
-// Do work to get from cache.
-```
-
-### Propagation
-
-Spans can have parents or can be root spans if they don't have any parents.
-The current span is propagated in-process and across the network to allow associating
-new child spans with the parent.
-
-In the same process, `context.Context` is used to propagate spans.
-`trace.StartSpan` creates a new span as a root if the current context
-doesn't contain a span. Or, it creates a child of the span that is
-already in current context. The returned context can be used to keep
-propagating the newly created span in the current context.
-
-[embedmd]:# (internal/readme/trace.go startend)
-```go
-ctx, span := trace.StartSpan(ctx, "cache.Get")
-defer span.End()
-
-// Do work to get from cache.
-```
-
-Across the network, OpenCensus provides different propagation
-methods for different protocols.
-
-* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation).
-* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation)
- by default but can be configured to use a custom propagation method by setting another
- [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat).
-
-## Execution Tracer
-
-With Go 1.11, OpenCensus Go will support integration with the Go execution tracer.
-See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68)
-for an example of their mutual use.
-
-## Profiles
-
-OpenCensus tags can be applied as profiler labels
-for users who are on Go 1.9 and above.
-
-[embedmd]:# (internal/readme/tags.go profiler)
-```go
-ctx, err = tag.New(ctx,
- tag.Insert(osKey, "macOS-10.12.5"),
- tag.Insert(userIDKey, "fff0989878"),
-)
-if err != nil {
- log.Fatal(err)
-}
-tag.Do(ctx, func(ctx context.Context) {
- // Do work.
- // When profiling is on, samples will be
- // recorded with the key/values from the tag map.
-})
-```
-
-A screenshot of the CPU profile from the program above:
-
-
-
-## Deprecation Policy
-
-Before version 1.0.0, the following deprecation policy will be observed:
-
-No backwards-incompatible changes will be made except for the removal of symbols that have
-been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release
-removing the *Deprecated* functionality will be made no sooner than 28 days after the first
-release in which the functionality was marked *Deprecated*.
-
-[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master
-[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go
-[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true
-[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master
-[godoc-image]: https://godoc.org/go.opencensus.io?status.svg
-[godoc-url]: https://godoc.org/go.opencensus.io
-[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg
-[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
-
-
-[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap
-[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace
-
-[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus
-[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver
-[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin
-[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger
-[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws
-[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog
-[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite
-[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter
-[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go
diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml
deleted file mode 100644
index d08f0edaf..000000000
--- a/vendor/go.opencensus.io/appveyor.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: "{build}"
-
-platform: x64
-
-clone_folder: c:\gopath\src\go.opencensus.io
-
-environment:
- GOPATH: 'c:\gopath'
- GO111MODULE: 'on'
- CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613
-
-stack: go 1.11
-
-before_test:
- - go version
- - go env
-
-build: false
-deploy: false
-
-test_script:
- - cd %APPVEYOR_BUILD_FOLDER%
- - go build -v .\...
- - go test -v .\... # No -race because cgo is disabled
diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go
deleted file mode 100644
index 81dc7183e..000000000
--- a/vendor/go.opencensus.io/internal/internal.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opencensus.io/internal"
-
-import (
- "fmt"
- "time"
-
- opencensus "go.opencensus.io"
-)
-
-// UserAgent is the user agent to be added to the outgoing
-// requests from the exporters.
-var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version())
-
-// MonotonicEndTime returns the end time at present
-// but offset from start, monotonically.
-//
-// The monotonic clock is used in subtractions hence
-// the duration since start added back to start gives
-// end as a monotonic time.
-// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
-func MonotonicEndTime(start time.Time) time.Time {
- return start.Add(time.Since(start))
-}
diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go
deleted file mode 100644
index de8ccf236..000000000
--- a/vendor/go.opencensus.io/internal/sanitize.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "strings"
- "unicode"
-)
-
-const labelKeySizeLimit = 100
-
-// Sanitize returns a string that is trunacated to 100 characters if it's too
-// long, and replaces non-alphanumeric characters to underscores.
-func Sanitize(s string) string {
- if len(s) == 0 {
- return s
- }
- if len(s) > labelKeySizeLimit {
- s = s[:labelKeySizeLimit]
- }
- s = strings.Map(sanitizeRune, s)
- if unicode.IsDigit(rune(s[0])) {
- s = "key_" + s
- }
- if s[0] == '_' {
- s = "key" + s
- }
- return s
-}
-
-// converts anything that is not a letter or digit to an underscore
-func sanitizeRune(r rune) rune {
- if unicode.IsLetter(r) || unicode.IsDigit(r) {
- return r
- }
- // Everything else turns into an underscore
- return '_'
-}
diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
deleted file mode 100644
index 41b2c3fc0..000000000
--- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// Package tagencoding contains the tag encoding
-// used interally by the stats collector.
-package tagencoding // import "go.opencensus.io/internal/tagencoding"
-
-// Values represent the encoded buffer for the values.
-type Values struct {
- Buffer []byte
- WriteIndex int
- ReadIndex int
-}
-
-func (vb *Values) growIfRequired(expected int) {
- if len(vb.Buffer)-vb.WriteIndex < expected {
- tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected)
- copy(tmp, vb.Buffer)
- vb.Buffer = tmp
- }
-}
-
-// WriteValue is the helper method to encode Values from map[Key][]byte.
-func (vb *Values) WriteValue(v []byte) {
- length := len(v) & 0xff
- vb.growIfRequired(1 + length)
-
- // writing length of v
- vb.Buffer[vb.WriteIndex] = byte(length)
- vb.WriteIndex++
-
- if length == 0 {
- // No value was encoded for this key
- return
- }
-
- // writing v
- copy(vb.Buffer[vb.WriteIndex:], v[:length])
- vb.WriteIndex += length
-}
-
-// ReadValue is the helper method to decode Values to a map[Key][]byte.
-func (vb *Values) ReadValue() []byte {
- // read length of v
- length := int(vb.Buffer[vb.ReadIndex])
- vb.ReadIndex++
- if length == 0 {
- // No value was encoded for this key
- return nil
- }
-
- // read value of v
- v := make([]byte, length)
- endIdx := vb.ReadIndex + length
- copy(v, vb.Buffer[vb.ReadIndex:endIdx])
- vb.ReadIndex = endIdx
- return v
-}
-
-// Bytes returns a reference to already written bytes in the Buffer.
-func (vb *Values) Bytes() []byte {
- return vb.Buffer[:vb.WriteIndex]
-}
diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go
deleted file mode 100644
index 073af7b47..000000000
--- a/vendor/go.opencensus.io/internal/traceinternals.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "time"
-)
-
-// Trace allows internal access to some trace functionality.
-// TODO(#412): remove this
-var Trace interface{}
-
-// LocalSpanStoreEnabled true if the local span store is enabled.
-var LocalSpanStoreEnabled bool
-
-// BucketConfiguration stores the number of samples to store for span buckets
-// for successful and failed spans for a particular span name.
-type BucketConfiguration struct {
- Name string
- MaxRequestsSucceeded int
- MaxRequestsErrors int
-}
-
-// PerMethodSummary is a summary of the spans stored for a single span name.
-type PerMethodSummary struct {
- Active int
- LatencyBuckets []LatencyBucketSummary
- ErrorBuckets []ErrorBucketSummary
-}
-
-// LatencyBucketSummary is a summary of a latency bucket.
-type LatencyBucketSummary struct {
- MinLatency, MaxLatency time.Duration
- Size int
-}
-
-// ErrorBucketSummary is a summary of an error bucket.
-type ErrorBucketSummary struct {
- ErrorCode int32
- Size int
-}
diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go
deleted file mode 100644
index 52a7b3bf8..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package metricdata contains the metrics data model.
-//
-// This is an EXPERIMENTAL package, and may change in arbitrary ways without
-// notice.
-package metricdata // import "go.opencensus.io/metric/metricdata"
diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go
deleted file mode 100644
index 12695ce2d..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/exemplar.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-import (
- "time"
-)
-
-// Exemplars keys.
-const (
- AttachmentKeySpanContext = "SpanContext"
-)
-
-// Exemplar is an example data point associated with each bucket of a
-// distribution type aggregation.
-//
-// Their purpose is to provide an example of the kind of thing
-// (request, RPC, trace span, etc.) that resulted in that measurement.
-type Exemplar struct {
- Value float64 // the value that was recorded
- Timestamp time.Time // the time the value was recorded
- Attachments Attachments // attachments (if any)
-}
-
-// Attachments is a map of extra values associated with a recorded data point.
-type Attachments map[string]interface{}
diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go
deleted file mode 100644
index aadae41e6..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/label.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-// LabelKey represents key of a label. It has optional
-// description attribute.
-type LabelKey struct {
- Key string
- Description string
-}
-
-// LabelValue represents the value of a label.
-// The zero value represents a missing label value, which may be treated
-// differently to an empty string value by some back ends.
-type LabelValue struct {
- Value string // string value of the label
- Present bool // flag that indicated whether a value is present or not
-}
-
-// NewLabelValue creates a new non-nil LabelValue that represents the given string.
-func NewLabelValue(val string) LabelValue {
- return LabelValue{Value: val, Present: true}
-}
diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go
deleted file mode 100644
index 8293712c7..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/metric.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-import (
- "time"
-
- "go.opencensus.io/resource"
-)
-
-// Descriptor holds metadata about a metric.
-type Descriptor struct {
- Name string // full name of the metric
- Description string // human-readable description
- Unit Unit // units for the measure
- Type Type // type of measure
- LabelKeys []LabelKey // label keys
-}
-
-// Metric represents a quantity measured against a resource with different
-// label value combinations.
-type Metric struct {
- Descriptor Descriptor // metric descriptor
- Resource *resource.Resource // resource against which this was measured
- TimeSeries []*TimeSeries // one time series for each combination of label values
-}
-
-// TimeSeries is a sequence of points associated with a combination of label
-// values.
-type TimeSeries struct {
- LabelValues []LabelValue // label values, same order as keys in the metric descriptor
- Points []Point // points sequence
- StartTime time.Time // time we started recording this time series
-}
diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go
deleted file mode 100644
index 7fe057b19..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/point.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-import (
- "time"
-)
-
-// Point is a single data point of a time series.
-type Point struct {
- // Time is the point in time that this point represents in a time series.
- Time time.Time
- // Value is the value of this point. Prefer using ReadValue to switching on
- // the value type, since new value types might be added.
- Value interface{}
-}
-
-//go:generate stringer -type ValueType
-
-// NewFloat64Point creates a new Point holding a float64 value.
-func NewFloat64Point(t time.Time, val float64) Point {
- return Point{
- Value: val,
- Time: t,
- }
-}
-
-// NewInt64Point creates a new Point holding an int64 value.
-func NewInt64Point(t time.Time, val int64) Point {
- return Point{
- Value: val,
- Time: t,
- }
-}
-
-// NewDistributionPoint creates a new Point holding a Distribution value.
-func NewDistributionPoint(t time.Time, val *Distribution) Point {
- return Point{
- Value: val,
- Time: t,
- }
-}
-
-// NewSummaryPoint creates a new Point holding a Summary value.
-func NewSummaryPoint(t time.Time, val *Summary) Point {
- return Point{
- Value: val,
- Time: t,
- }
-}
-
-// ValueVisitor allows reading the value of a point.
-type ValueVisitor interface {
- VisitFloat64Value(float64)
- VisitInt64Value(int64)
- VisitDistributionValue(*Distribution)
- VisitSummaryValue(*Summary)
-}
-
-// ReadValue accepts a ValueVisitor and calls the appropriate method with the
-// value of this point.
-// Consumers of Point should use this in preference to switching on the type
-// of the value directly, since new value types may be added.
-func (p Point) ReadValue(vv ValueVisitor) {
- switch v := p.Value.(type) {
- case int64:
- vv.VisitInt64Value(v)
- case float64:
- vv.VisitFloat64Value(v)
- case *Distribution:
- vv.VisitDistributionValue(v)
- case *Summary:
- vv.VisitSummaryValue(v)
- default:
- panic("unexpected value type")
- }
-}
-
-// Distribution contains summary statistics for a population of values. It
-// optionally contains a histogram representing the distribution of those
-// values across a set of buckets.
-type Distribution struct {
- // Count is the number of values in the population. Must be non-negative. This value
- // must equal the sum of the values in bucket_counts if a histogram is
- // provided.
- Count int64
- // Sum is the sum of the values in the population. If count is zero then this field
- // must be zero.
- Sum float64
- // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the
- // population. For values x_i this is:
- //
- // Sum[i=1..n]((x_i - mean)^2)
- //
- // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
- // describes Welford's method for accumulating this sum in one pass.
- //
- // If count is zero then this field must be zero.
- SumOfSquaredDeviation float64
- // BucketOptions describes the bounds of the histogram buckets in this
- // distribution.
- //
- // A Distribution may optionally contain a histogram of the values in the
- // population.
- //
- // If nil, there is no associated histogram.
- BucketOptions *BucketOptions
- // Bucket If the distribution does not have a histogram, then omit this field.
- // If there is a histogram, then the sum of the values in the Bucket counts
- // must equal the value in the count field of the distribution.
- Buckets []Bucket
-}
-
-// BucketOptions describes the bounds of the histogram buckets in this
-// distribution.
-type BucketOptions struct {
- // Bounds specifies a set of bucket upper bounds.
- // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket
- // index i are:
- //
- // [0, Bounds[i]) for i == 0
- // [Bounds[i-1], Bounds[i]) for 0 < i < N-1
- // [Bounds[i-1], +infinity) for i == N-1
- Bounds []float64
-}
-
-// Bucket represents a single bucket (value range) in a distribution.
-type Bucket struct {
- // Count is the number of values in each bucket of the histogram, as described in
- // bucket_bounds.
- Count int64
- // Exemplar associated with this bucket (if any).
- Exemplar *Exemplar
-}
-
-// Summary is a representation of percentiles.
-type Summary struct {
- // Count is the cumulative count (if available).
- Count int64
- // Sum is the cumulative sum of values (if available).
- Sum float64
- // HasCountAndSum is true if Count and Sum are available.
- HasCountAndSum bool
- // Snapshot represents percentiles calculated over an arbitrary time window.
- // The values in this struct can be reset at arbitrary unknown times, with
- // the requirement that all of them are reset at the same time.
- Snapshot Snapshot
-}
-
-// Snapshot represents percentiles over an arbitrary time.
-// The values in this struct can be reset at arbitrary unknown times, with
-// the requirement that all of them are reset at the same time.
-type Snapshot struct {
- // Count is the number of values in the snapshot. Optional since some systems don't
- // expose this. Set to 0 if not available.
- Count int64
- // Sum is the sum of values in the snapshot. Optional since some systems don't
- // expose this. If count is 0 then this field must be zero.
- Sum float64
- // Percentiles is a map from percentile (range (0-100.0]) to the value of
- // the percentile.
- Percentiles map[float64]float64
-}
-
-//go:generate stringer -type Type
-
-// Type is the overall type of metric, including its value type and whether it
-// represents a cumulative total (since the start time) or if it represents a
-// gauge value.
-type Type int
-
-// Metric types.
-const (
- TypeGaugeInt64 Type = iota
- TypeGaugeFloat64
- TypeGaugeDistribution
- TypeCumulativeInt64
- TypeCumulativeFloat64
- TypeCumulativeDistribution
- TypeSummary
-)
diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go
deleted file mode 100644
index c3f8ec27b..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/type_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by "stringer -type Type"; DO NOT EDIT.
-
-package metricdata
-
-import "strconv"
-
-const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary"
-
-var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128}
-
-func (i Type) String() string {
- if i < 0 || i >= Type(len(_Type_index)-1) {
- return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Type_name[_Type_index[i]:_Type_index[i+1]]
-}
diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go
deleted file mode 100644
index b483a1371..000000000
--- a/vendor/go.opencensus.io/metric/metricdata/unit.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricdata
-
-// Unit is a string encoded according to the case-sensitive abbreviations from the
-// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
-type Unit string
-
-// Predefined units. To record against a unit not represented here, create your
-// own Unit type constant from a string.
-const (
- UnitDimensionless Unit = "1"
- UnitBytes Unit = "By"
- UnitMilliseconds Unit = "ms"
-)
diff --git a/vendor/go.opencensus.io/metric/metricexport/doc.go b/vendor/go.opencensus.io/metric/metricexport/doc.go
deleted file mode 100644
index df632a792..000000000
--- a/vendor/go.opencensus.io/metric/metricexport/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package metricexport contains support for exporting metric data.
-//
-// This is an EXPERIMENTAL package, and may change in arbitrary ways without
-// notice.
-package metricexport // import "go.opencensus.io/metric/metricexport"
diff --git a/vendor/go.opencensus.io/metric/metricexport/export.go b/vendor/go.opencensus.io/metric/metricexport/export.go
deleted file mode 100644
index 23f4a864a..000000000
--- a/vendor/go.opencensus.io/metric/metricexport/export.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricexport
-
-import (
- "context"
-
- "go.opencensus.io/metric/metricdata"
-)
-
-// Exporter is an interface that exporters implement to export the metric data.
-type Exporter interface {
- ExportMetrics(ctx context.Context, data []*metricdata.Metric) error
-}
diff --git a/vendor/go.opencensus.io/metric/metricexport/reader.go b/vendor/go.opencensus.io/metric/metricexport/reader.go
deleted file mode 100644
index 8a09d0f00..000000000
--- a/vendor/go.opencensus.io/metric/metricexport/reader.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package metricexport
-
-import (
- "context"
- "fmt"
- "sync"
- "time"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/metric/metricproducer"
- "go.opencensus.io/trace"
-)
-
-var (
- defaultSampler = trace.ProbabilitySampler(0.0001)
- errReportingIntervalTooLow = fmt.Errorf("reporting interval less than %d", minimumReportingDuration)
- errAlreadyStarted = fmt.Errorf("already started")
- errIntervalReaderNil = fmt.Errorf("interval reader is nil")
- errExporterNil = fmt.Errorf("exporter is nil")
- errReaderNil = fmt.Errorf("reader is nil")
-)
-
-const (
- defaultReportingDuration = 60 * time.Second
- minimumReportingDuration = 1 * time.Second
- defaultSpanName = "ExportMetrics"
-)
-
-// ReaderOptions contains options pertaining to metrics reader.
-type ReaderOptions struct {
- // SpanName is the name used for span created to export metrics.
- SpanName string
-}
-
-// Reader reads metrics from all producers registered
-// with producer manager and exports those metrics using provided
-// exporter.
-type Reader struct {
- sampler trace.Sampler
-
- spanName string
-}
-
-// IntervalReader periodically reads metrics from all producers registered
-// with producer manager and exports those metrics using provided
-// exporter. Call Reader.Stop() to stop the reader.
-type IntervalReader struct {
- // ReportingInterval it the time duration between two consecutive
- // metrics reporting. defaultReportingDuration is used if it is not set.
- // It cannot be set lower than minimumReportingDuration.
- ReportingInterval time.Duration
-
- exporter Exporter
- timer *time.Ticker
- quit, done chan bool
- mu sync.RWMutex
- reader *Reader
-}
-
-// ReaderOption apply changes to ReaderOptions.
-type ReaderOption func(*ReaderOptions)
-
-// WithSpanName makes new reader to use given span name when exporting metrics.
-func WithSpanName(spanName string) ReaderOption {
- return func(o *ReaderOptions) {
- o.SpanName = spanName
- }
-}
-
-// NewReader returns a reader configured with specified options.
-func NewReader(o ...ReaderOption) *Reader {
- var opts ReaderOptions
- for _, op := range o {
- op(&opts)
- }
- reader := &Reader{defaultSampler, defaultSpanName}
- if opts.SpanName != "" {
- reader.spanName = opts.SpanName
- }
- return reader
-}
-
-// NewIntervalReader creates a reader. Once started it periodically
-// reads metrics from all producers and exports them using provided exporter.
-func NewIntervalReader(reader *Reader, exporter Exporter) (*IntervalReader, error) {
- if exporter == nil {
- return nil, errExporterNil
- }
- if reader == nil {
- return nil, errReaderNil
- }
-
- r := &IntervalReader{
- exporter: exporter,
- reader: reader,
- }
- return r, nil
-}
-
-// Start starts the IntervalReader which periodically reads metrics from all
-// producers registered with global producer manager. If the reporting interval
-// is not set prior to calling this function then default reporting interval
-// is used.
-func (ir *IntervalReader) Start() error {
- if ir == nil {
- return errIntervalReaderNil
- }
- ir.mu.Lock()
- defer ir.mu.Unlock()
- var reportingInterval = defaultReportingDuration
- if ir.ReportingInterval != 0 {
- if ir.ReportingInterval < minimumReportingDuration {
- return errReportingIntervalTooLow
- }
- reportingInterval = ir.ReportingInterval
- }
-
- if ir.quit != nil {
- return errAlreadyStarted
- }
- ir.timer = time.NewTicker(reportingInterval)
- ir.quit = make(chan bool)
- ir.done = make(chan bool)
-
- go ir.startInternal()
- return nil
-}
-
-func (ir *IntervalReader) startInternal() {
- for {
- select {
- case <-ir.timer.C:
- ir.reader.ReadAndExport(ir.exporter)
- case <-ir.quit:
- ir.timer.Stop()
- ir.done <- true
- return
- }
- }
-}
-
-// Stop stops the reader from reading and exporting metrics.
-// Additional call to Stop are no-ops.
-func (ir *IntervalReader) Stop() {
- if ir == nil {
- return
- }
- ir.mu.Lock()
- defer ir.mu.Unlock()
- if ir.quit == nil {
- return
- }
- ir.quit <- true
- <-ir.done
- close(ir.quit)
- close(ir.done)
- ir.quit = nil
-}
-
-// Flush flushes the metrics if IntervalReader is stopped, otherwise no-op.
-func (ir *IntervalReader) Flush() {
- ir.mu.Lock()
- defer ir.mu.Unlock()
-
- // No-op if IntervalReader is not stopped
- if ir.quit != nil {
- return
- }
-
- ir.reader.ReadAndExport(ir.exporter)
-}
-
-// ReadAndExport reads metrics from all producer registered with
-// producer manager and then exports them using provided exporter.
-func (r *Reader) ReadAndExport(exporter Exporter) {
- ctx, span := trace.StartSpan(context.Background(), r.spanName, trace.WithSampler(r.sampler))
- defer span.End()
- producers := metricproducer.GlobalManager().GetAll()
- data := []*metricdata.Metric{}
- for _, producer := range producers {
- data = append(data, producer.Read()...)
- }
- // TODO: [rghetia] add metrics for errors.
- exporter.ExportMetrics(ctx, data)
-}
diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go
deleted file mode 100644
index ca1f39049..000000000
--- a/vendor/go.opencensus.io/metric/metricproducer/manager.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricproducer
-
-import (
- "sync"
-)
-
-// Manager maintains a list of active producers. Producers can register
-// with the manager to allow readers to read all metrics provided by them.
-// Readers can retrieve all producers registered with the manager,
-// read metrics from the producers and export them.
-type Manager struct {
- mu sync.RWMutex
- producers map[Producer]struct{}
-}
-
-var prodMgr *Manager
-var once sync.Once
-
-// GlobalManager is a single instance of producer manager
-// that is used by all producers and all readers.
-func GlobalManager() *Manager {
- once.Do(func() {
- prodMgr = &Manager{}
- prodMgr.producers = make(map[Producer]struct{})
- })
- return prodMgr
-}
-
-// AddProducer adds the producer to the Manager if it is not already present.
-func (pm *Manager) AddProducer(producer Producer) {
- if producer == nil {
- return
- }
- pm.mu.Lock()
- defer pm.mu.Unlock()
- pm.producers[producer] = struct{}{}
-}
-
-// DeleteProducer deletes the producer from the Manager if it is present.
-func (pm *Manager) DeleteProducer(producer Producer) {
- if producer == nil {
- return
- }
- pm.mu.Lock()
- defer pm.mu.Unlock()
- delete(pm.producers, producer)
-}
-
-// GetAll returns a slice of all producer currently registered with
-// the Manager. For each call it generates a new slice. The slice
-// should not be cached as registration may change at any time. It is
-// typically called periodically by exporter to read metrics from
-// the producers.
-func (pm *Manager) GetAll() []Producer {
- pm.mu.Lock()
- defer pm.mu.Unlock()
- producers := make([]Producer, len(pm.producers))
- i := 0
- for producer := range pm.producers {
- producers[i] = producer
- i++
- }
- return producers
-}
diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go
deleted file mode 100644
index 6cee9ed17..000000000
--- a/vendor/go.opencensus.io/metric/metricproducer/producer.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metricproducer
-
-import (
- "go.opencensus.io/metric/metricdata"
-)
-
-// Producer is a source of metrics.
-type Producer interface {
- // Read should return the current values of all metrics supported by this
- // metric provider.
- // The returned metrics should be unique for each combination of name and
- // resource.
- Read() []*metricdata.Metric
-}
diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go
deleted file mode 100644
index 11e31f421..000000000
--- a/vendor/go.opencensus.io/opencensus.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package opencensus contains Go support for OpenCensus.
-package opencensus // import "go.opencensus.io"
-
-// Version is the current release version of OpenCensus in use.
-func Version() string {
- return "0.24.0"
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go
deleted file mode 100644
index da815b2a7..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/client.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "net/http"
- "net/http/httptrace"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// Transport is an http.RoundTripper that instruments all outgoing requests with
-// OpenCensus stats and tracing.
-//
-// The zero value is intended to be a useful default, but for
-// now it's recommended that you explicitly set Propagation, since the default
-// for this may change.
-type Transport struct {
- // Base may be set to wrap another http.RoundTripper that does the actual
- // requests. By default http.DefaultTransport is used.
- //
- // If base HTTP roundtripper implements CancelRequest,
- // the returned round tripper will be cancelable.
- Base http.RoundTripper
-
- // Propagation defines how traces are propagated. If unspecified, a default
- // (currently B3 format) will be used.
- Propagation propagation.HTTPFormat
-
- // StartOptions are applied to the span started by this Transport around each
- // request.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindClient
- // for spans started by this transport.
- StartOptions trace.StartOptions
-
- // GetStartOptions allows to set start options per request. If set,
- // StartOptions is going to be ignored.
- GetStartOptions func(*http.Request) trace.StartOptions
-
- // NameFromRequest holds the function to use for generating the span name
- // from the information found in the outgoing HTTP Request. By default the
- // name equals the URL Path.
- FormatSpanName func(*http.Request) string
-
- // NewClientTrace may be set to a function allowing the current *trace.Span
- // to be annotated with HTTP request event information emitted by the
- // httptrace package.
- NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
-
- // TODO: Implement tag propagation for HTTP.
-}
-
-// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request.
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- rt := t.base()
- if isHealthEndpoint(req.URL.Path) {
- return rt.RoundTrip(req)
- }
- // TODO: remove excessive nesting of http.RoundTrippers here.
- format := t.Propagation
- if format == nil {
- format = defaultFormat
- }
- spanNameFormatter := t.FormatSpanName
- if spanNameFormatter == nil {
- spanNameFormatter = spanNameFromURL
- }
-
- startOpts := t.StartOptions
- if t.GetStartOptions != nil {
- startOpts = t.GetStartOptions(req)
- }
-
- rt = &traceTransport{
- base: rt,
- format: format,
- startOptions: trace.StartOptions{
- Sampler: startOpts.Sampler,
- SpanKind: trace.SpanKindClient,
- },
- formatSpanName: spanNameFormatter,
- newClientTrace: t.NewClientTrace,
- }
- rt = statsTransport{base: rt}
- return rt.RoundTrip(req)
-}
-
-func (t *Transport) base() http.RoundTripper {
- if t.Base != nil {
- return t.Base
- }
- return http.DefaultTransport
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *Transport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base().(canceler); ok {
- cr.CancelRequest(req)
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go
deleted file mode 100644
index 17142aabe..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "context"
- "io"
- "net/http"
- "strconv"
- "sync"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/tag"
-)
-
-// statsTransport is an http.RoundTripper that collects stats for the outgoing requests.
-type statsTransport struct {
- base http.RoundTripper
-}
-
-// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request.
-func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- ctx, _ := tag.New(req.Context(),
- tag.Upsert(KeyClientHost, req.Host),
- tag.Upsert(Host, req.Host),
- tag.Upsert(KeyClientPath, req.URL.Path),
- tag.Upsert(Path, req.URL.Path),
- tag.Upsert(KeyClientMethod, req.Method),
- tag.Upsert(Method, req.Method))
- req = req.WithContext(ctx)
- track := &tracker{
- start: time.Now(),
- ctx: ctx,
- }
- if req.Body == nil {
- // TODO: Handle cases where ContentLength is not set.
- track.reqSize = -1
- } else if req.ContentLength > 0 {
- track.reqSize = req.ContentLength
- }
- stats.Record(ctx, ClientRequestCount.M(1))
-
- // Perform request.
- resp, err := t.base.RoundTrip(req)
-
- if err != nil {
- track.statusCode = http.StatusInternalServerError
- track.end()
- } else {
- track.statusCode = resp.StatusCode
- if req.Method != "HEAD" {
- track.respContentLength = resp.ContentLength
- }
- if resp.Body == nil {
- track.end()
- } else {
- track.body = resp.Body
- resp.Body = wrappedBody(track, resp.Body)
- }
- }
- return resp, err
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t statsTransport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base.(canceler); ok {
- cr.CancelRequest(req)
- }
-}
-
-type tracker struct {
- ctx context.Context
- respSize int64
- respContentLength int64
- reqSize int64
- start time.Time
- body io.ReadCloser
- statusCode int
- endOnce sync.Once
-}
-
-var _ io.ReadCloser = (*tracker)(nil)
-
-func (t *tracker) end() {
- t.endOnce.Do(func() {
- latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond)
- respSize := t.respSize
- if t.respSize == 0 && t.respContentLength > 0 {
- respSize = t.respContentLength
- }
- m := []stats.Measurement{
- ClientSentBytes.M(t.reqSize),
- ClientReceivedBytes.M(respSize),
- ClientRoundtripLatency.M(latencyMs),
- ClientLatency.M(latencyMs),
- ClientResponseBytes.M(t.respSize),
- }
- if t.reqSize >= 0 {
- m = append(m, ClientRequestBytes.M(t.reqSize))
- }
-
- stats.RecordWithTags(t.ctx, []tag.Mutator{
- tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)),
- tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)),
- }, m...)
- })
-}
-
-func (t *tracker) Read(b []byte) (int, error) {
- n, err := t.body.Read(b)
- t.respSize += int64(n)
- switch err {
- case nil:
- return n, nil
- case io.EOF:
- t.end()
- }
- return n, err
-}
-
-func (t *tracker) Close() error {
- // Invoking endSpan on Close will help catch the cases
- // in which a read returned a non-nil error, we set the
- // span status but didn't end the span.
- t.end()
- return t.body.Close()
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go
deleted file mode 100644
index 10e626b16..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ochttp provides OpenCensus instrumentation for net/http package.
-//
-// For server instrumentation, see Handler. For client-side instrumentation,
-// see Transport.
-package ochttp // import "go.opencensus.io/plugin/ochttp"
diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
deleted file mode 100644
index 9ad885219..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package b3 contains a propagation.HTTPFormat implementation
-// for B3 propagation. See https://github.com/openzipkin/b3-propagation
-// for more details.
-package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3"
-
-import (
- "encoding/hex"
- "net/http"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// B3 headers that OpenCensus understands.
-const (
- TraceIDHeader = "X-B3-TraceId"
- SpanIDHeader = "X-B3-SpanId"
- SampledHeader = "X-B3-Sampled"
-)
-
-// HTTPFormat implements propagation.HTTPFormat to propagate
-// traces in HTTP headers in B3 propagation format.
-// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers
-// because there are additional fields not represented in the
-// OpenCensus span context. Spans created from the incoming
-// header will be the direct children of the client-side span.
-// Similarly, receiver of the outgoing spans should use client-side
-// span created by OpenCensus as the parent.
-type HTTPFormat struct{}
-
-var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
-
-// SpanContextFromRequest extracts a B3 span context from incoming requests.
-func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader))
- if !ok {
- return trace.SpanContext{}, false
- }
- sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader))
- if !ok {
- return trace.SpanContext{}, false
- }
- sampled, _ := ParseSampled(req.Header.Get(SampledHeader))
- return trace.SpanContext{
- TraceID: tid,
- SpanID: sid,
- TraceOptions: sampled,
- }, true
-}
-
-// ParseTraceID parses the value of the X-B3-TraceId header.
-func ParseTraceID(tid string) (trace.TraceID, bool) {
- if tid == "" {
- return trace.TraceID{}, false
- }
- b, err := hex.DecodeString(tid)
- if err != nil || len(b) > 16 {
- return trace.TraceID{}, false
- }
- var traceID trace.TraceID
- if len(b) <= 8 {
- // The lower 64-bits.
- start := 8 + (8 - len(b))
- copy(traceID[start:], b)
- } else {
- start := 16 - len(b)
- copy(traceID[start:], b)
- }
-
- return traceID, true
-}
-
-// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers.
-func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) {
- if sid == "" {
- return trace.SpanID{}, false
- }
- b, err := hex.DecodeString(sid)
- if err != nil || len(b) > 8 {
- return trace.SpanID{}, false
- }
- start := 8 - len(b)
- copy(spanID[start:], b)
- return spanID, true
-}
-
-// ParseSampled parses the value of the X-B3-Sampled header.
-func ParseSampled(sampled string) (trace.TraceOptions, bool) {
- switch sampled {
- case "true", "1":
- return trace.TraceOptions(1), true
- default:
- return trace.TraceOptions(0), false
- }
-}
-
-// SpanContextToRequest modifies the given request to include B3 headers.
-func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:]))
- req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:]))
-
- var sampled string
- if sc.IsSampled() {
- sampled = "1"
- } else {
- sampled = "0"
- }
- req.Header.Set(SampledHeader, sampled)
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go
deleted file mode 100644
index 5e6a34307..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/route.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "context"
- "net/http"
-
- "go.opencensus.io/tag"
-)
-
-// SetRoute sets the http_server_route tag to the given value.
-// It's useful when an HTTP framework does not support the http.Handler interface
-// and using WithRouteTag is not an option, but provides a way to hook into the request flow.
-func SetRoute(ctx context.Context, route string) {
- if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok {
- a.t = append(a.t, tag.Upsert(KeyServerRoute, route))
- }
-}
-
-// WithRouteTag returns an http.Handler that records stats with the
-// http_server_route tag set to the given value.
-func WithRouteTag(handler http.Handler, route string) http.Handler {
- return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator {
- addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)}
- ctx, _ := tag.New(r.Context(), addRoute...)
- r = r.WithContext(ctx)
- handler.ServeHTTP(w, r)
- return addRoute
- })
-}
-
-// taggedHandlerFunc is a http.Handler that returns tags describing the
-// processing of the request. These tags will be recorded along with the
-// measures in this package at the end of the request.
-type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator
-
-func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- tags := h(w, r)
- if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok {
- a.t = append(a.t, tags...)
- }
-}
-
-type addedTagsKey struct{}
-
-type addedTags struct {
- t []tag.Mutator
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go
deleted file mode 100644
index f7c8434be..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/server.go
+++ /dev/null
@@ -1,455 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "context"
- "io"
- "net/http"
- "strconv"
- "sync"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/tag"
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// Handler is an http.Handler wrapper to instrument your HTTP server with
-// OpenCensus. It supports both stats and tracing.
-//
-// # Tracing
-//
-// This handler is aware of the incoming request's span, reading it from request
-// headers as configured using the Propagation field.
-// The extracted span can be accessed from the incoming request's
-// context.
-//
-// span := trace.FromContext(r.Context())
-//
-// The server span will be automatically ended at the end of ServeHTTP.
-type Handler struct {
- // Propagation defines how traces are propagated. If unspecified,
- // B3 propagation will be used.
- Propagation propagation.HTTPFormat
-
- // Handler is the handler used to handle the incoming request.
- Handler http.Handler
-
- // StartOptions are applied to the span started by this Handler around each
- // request.
- //
- // StartOptions.SpanKind will always be set to trace.SpanKindServer
- // for spans started by this transport.
- StartOptions trace.StartOptions
-
- // GetStartOptions allows to set start options per request. If set,
- // StartOptions is going to be ignored.
- GetStartOptions func(*http.Request) trace.StartOptions
-
- // IsPublicEndpoint should be set to true for publicly accessible HTTP(S)
- // servers. If true, any trace metadata set on the incoming request will
- // be added as a linked trace instead of being added as a parent of the
- // current trace.
- IsPublicEndpoint bool
-
- // FormatSpanName holds the function to use for generating the span name
- // from the information found in the incoming HTTP Request. By default the
- // name equals the URL Path.
- FormatSpanName func(*http.Request) string
-
- // IsHealthEndpoint holds the function to use for determining if the
- // incoming HTTP request should be considered a health check. This is in
- // addition to the private isHealthEndpoint func which may also indicate
- // tracing should be skipped.
- IsHealthEndpoint func(*http.Request) bool
-}
-
-func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- var tags addedTags
- r, traceEnd := h.startTrace(w, r)
- defer traceEnd()
- w, statsEnd := h.startStats(w, r)
- defer statsEnd(&tags)
- handler := h.Handler
- if handler == nil {
- handler = http.DefaultServeMux
- }
- r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags))
- handler.ServeHTTP(w, r)
-}
-
-func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) {
- if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) {
- return r, func() {}
- }
- var name string
- if h.FormatSpanName == nil {
- name = spanNameFromURL(r)
- } else {
- name = h.FormatSpanName(r)
- }
- ctx := r.Context()
-
- startOpts := h.StartOptions
- if h.GetStartOptions != nil {
- startOpts = h.GetStartOptions(r)
- }
-
- var span *trace.Span
- sc, ok := h.extractSpanContext(r)
- if ok && !h.IsPublicEndpoint {
- ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc,
- trace.WithSampler(startOpts.Sampler),
- trace.WithSpanKind(trace.SpanKindServer))
- } else {
- ctx, span = trace.StartSpan(ctx, name,
- trace.WithSampler(startOpts.Sampler),
- trace.WithSpanKind(trace.SpanKindServer),
- )
- if ok {
- span.AddLink(trace.Link{
- TraceID: sc.TraceID,
- SpanID: sc.SpanID,
- Type: trace.LinkTypeParent,
- Attributes: nil,
- })
- }
- }
- span.AddAttributes(requestAttrs(r)...)
- if r.Body == nil {
- // TODO: Handle cases where ContentLength is not set.
- } else if r.ContentLength > 0 {
- span.AddMessageReceiveEvent(0, /* TODO: messageID */
- r.ContentLength, -1)
- }
- return r.WithContext(ctx), span.End
-}
-
-func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) {
- if h.Propagation == nil {
- return defaultFormat.SpanContextFromRequest(r)
- }
- return h.Propagation.SpanContextFromRequest(r)
-}
-
-func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) {
- ctx, _ := tag.New(r.Context(),
- tag.Upsert(Host, r.Host),
- tag.Upsert(Path, r.URL.Path),
- tag.Upsert(Method, r.Method))
- track := &trackingResponseWriter{
- start: time.Now(),
- ctx: ctx,
- writer: w,
- }
- if r.Body == nil {
- // TODO: Handle cases where ContentLength is not set.
- track.reqSize = -1
- } else if r.ContentLength > 0 {
- track.reqSize = r.ContentLength
- }
- stats.Record(ctx, ServerRequestCount.M(1))
- return track.wrappedResponseWriter(), track.end
-}
-
-type trackingResponseWriter struct {
- ctx context.Context
- reqSize int64
- respSize int64
- start time.Time
- statusCode int
- statusLine string
- endOnce sync.Once
- writer http.ResponseWriter
-}
-
-// Compile time assertion for ResponseWriter interface
-var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
-
-func (t *trackingResponseWriter) end(tags *addedTags) {
- t.endOnce.Do(func() {
- if t.statusCode == 0 {
- t.statusCode = 200
- }
-
- span := trace.FromContext(t.ctx)
- span.SetStatus(TraceStatus(t.statusCode, t.statusLine))
- span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode)))
-
- m := []stats.Measurement{
- ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)),
- ServerResponseBytes.M(t.respSize),
- }
- if t.reqSize >= 0 {
- m = append(m, ServerRequestBytes.M(t.reqSize))
- }
- allTags := make([]tag.Mutator, len(tags.t)+1)
- allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))
- copy(allTags[1:], tags.t)
- stats.RecordWithTags(t.ctx, allTags, m...)
- })
-}
-
-func (t *trackingResponseWriter) Header() http.Header {
- return t.writer.Header()
-}
-
-func (t *trackingResponseWriter) Write(data []byte) (int, error) {
- n, err := t.writer.Write(data)
- t.respSize += int64(n)
- // Add message event for request bytes sent.
- span := trace.FromContext(t.ctx)
- span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1)
- return n, err
-}
-
-func (t *trackingResponseWriter) WriteHeader(statusCode int) {
- t.writer.WriteHeader(statusCode)
- t.statusCode = statusCode
- t.statusLine = http.StatusText(t.statusCode)
-}
-
-// wrappedResponseWriter returns a wrapped version of the original
-//
-// ResponseWriter and only implements the same combination of additional
-//
-// interfaces as the original.
-// This implementation is based on https://github.com/felixge/httpsnoop.
-func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {
- var (
- hj, i0 = t.writer.(http.Hijacker)
- cn, i1 = t.writer.(http.CloseNotifier)
- pu, i2 = t.writer.(http.Pusher)
- fl, i3 = t.writer.(http.Flusher)
- rf, i4 = t.writer.(io.ReaderFrom)
- )
-
- switch {
- case !i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- }{t}
- case !i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- io.ReaderFrom
- }{t, rf}
- case !i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Flusher
- }{t, fl}
- case !i0 && !i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Flusher
- io.ReaderFrom
- }{t, fl, rf}
- case !i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- }{t, pu}
- case !i0 && !i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- io.ReaderFrom
- }{t, pu, rf}
- case !i0 && !i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- http.Flusher
- }{t, pu, fl}
- case !i0 && !i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, pu, fl, rf}
- case !i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- }{t, cn}
- case !i0 && i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- io.ReaderFrom
- }{t, cn, rf}
- case !i0 && i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Flusher
- }{t, cn, fl}
- case !i0 && i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Flusher
- io.ReaderFrom
- }{t, cn, fl, rf}
- case !i0 && i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- }{t, cn, pu}
- case !i0 && i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- io.ReaderFrom
- }{t, cn, pu, rf}
- case !i0 && i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- http.Flusher
- }{t, cn, pu, fl}
- case !i0 && i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, cn, pu, fl, rf}
- case i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- }{t, hj}
- case i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- io.ReaderFrom
- }{t, hj, rf}
- case i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Flusher
- }{t, hj, fl}
- case i0 && !i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Flusher
- io.ReaderFrom
- }{t, hj, fl, rf}
- case i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- }{t, hj, pu}
- case i0 && !i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- io.ReaderFrom
- }{t, hj, pu, rf}
- case i0 && !i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- http.Flusher
- }{t, hj, pu, fl}
- case i0 && !i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, hj, pu, fl, rf}
- case i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- }{t, hj, cn}
- case i0 && i1 && !i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- io.ReaderFrom
- }{t, hj, cn, rf}
- case i0 && i1 && !i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Flusher
- }{t, hj, cn, fl}
- case i0 && i1 && !i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Flusher
- io.ReaderFrom
- }{t, hj, cn, fl, rf}
- case i0 && i1 && i2 && !i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- }{t, hj, cn, pu}
- case i0 && i1 && i2 && !i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- io.ReaderFrom
- }{t, hj, cn, pu, rf}
- case i0 && i1 && i2 && i3 && !i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- http.Flusher
- }{t, hj, cn, pu, fl}
- case i0 && i1 && i2 && i3 && i4:
- return struct {
- http.ResponseWriter
- http.Hijacker
- http.CloseNotifier
- http.Pusher
- http.Flusher
- io.ReaderFrom
- }{t, hj, cn, pu, fl, rf}
- default:
- return struct {
- http.ResponseWriter
- }{t}
- }
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go
deleted file mode 100644
index 05c6c56cc..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "crypto/tls"
- "net/http"
- "net/http/httptrace"
- "strings"
-
- "go.opencensus.io/trace"
-)
-
-type spanAnnotator struct {
- sp *trace.Span
-}
-
-// TODO: Remove NewSpanAnnotator at the next release.
-
-// NewSpanAnnotator returns a httptrace.ClientTrace which annotates
-// all emitted httptrace events on the provided Span.
-// Deprecated: Use NewSpanAnnotatingClientTrace instead
-func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace {
- return NewSpanAnnotatingClientTrace(r, s)
-}
-
-// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates
-// all emitted httptrace events on the provided Span.
-func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace {
- sa := spanAnnotator{sp: s}
-
- return &httptrace.ClientTrace{
- GetConn: sa.getConn,
- GotConn: sa.gotConn,
- PutIdleConn: sa.putIdleConn,
- GotFirstResponseByte: sa.gotFirstResponseByte,
- Got100Continue: sa.got100Continue,
- DNSStart: sa.dnsStart,
- DNSDone: sa.dnsDone,
- ConnectStart: sa.connectStart,
- ConnectDone: sa.connectDone,
- TLSHandshakeStart: sa.tlsHandshakeStart,
- TLSHandshakeDone: sa.tlsHandshakeDone,
- WroteHeaders: sa.wroteHeaders,
- Wait100Continue: sa.wait100Continue,
- WroteRequest: sa.wroteRequest,
- }
-}
-
-func (s spanAnnotator) getConn(hostPort string) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.get_connection.host_port", hostPort),
- }
- s.sp.Annotate(attrs, "GetConn")
-}
-
-func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) {
- attrs := []trace.Attribute{
- trace.BoolAttribute("httptrace.got_connection.reused", info.Reused),
- trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle),
- }
- if info.WasIdle {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String()))
- }
- s.sp.Annotate(attrs, "GotConn")
-}
-
-// PutIdleConn implements a httptrace.ClientTrace hook
-func (s spanAnnotator) putIdleConn(err error) {
- var attrs []trace.Attribute
- if err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.put_idle_connection.error", err.Error()))
- }
- s.sp.Annotate(attrs, "PutIdleConn")
-}
-
-func (s spanAnnotator) gotFirstResponseByte() {
- s.sp.Annotate(nil, "GotFirstResponseByte")
-}
-
-func (s spanAnnotator) got100Continue() {
- s.sp.Annotate(nil, "Got100Continue")
-}
-
-func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.dns_start.host", info.Host),
- }
- s.sp.Annotate(attrs, "DNSStart")
-}
-
-func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) {
- var addrs []string
- for _, addr := range info.Addrs {
- addrs = append(addrs, addr.String())
- }
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")),
- }
- if info.Err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.dns_done.error", info.Err.Error()))
- }
- s.sp.Annotate(attrs, "DNSDone")
-}
-
-func (s spanAnnotator) connectStart(network, addr string) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.connect_start.network", network),
- trace.StringAttribute("httptrace.connect_start.addr", addr),
- }
- s.sp.Annotate(attrs, "ConnectStart")
-}
-
-func (s spanAnnotator) connectDone(network, addr string, err error) {
- attrs := []trace.Attribute{
- trace.StringAttribute("httptrace.connect_done.network", network),
- trace.StringAttribute("httptrace.connect_done.addr", addr),
- }
- if err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.connect_done.error", err.Error()))
- }
- s.sp.Annotate(attrs, "ConnectDone")
-}
-
-func (s spanAnnotator) tlsHandshakeStart() {
- s.sp.Annotate(nil, "TLSHandshakeStart")
-}
-
-func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) {
- var attrs []trace.Attribute
- if err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error()))
- }
- s.sp.Annotate(attrs, "TLSHandshakeDone")
-}
-
-func (s spanAnnotator) wroteHeaders() {
- s.sp.Annotate(nil, "WroteHeaders")
-}
-
-func (s spanAnnotator) wait100Continue() {
- s.sp.Annotate(nil, "Wait100Continue")
-}
-
-func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) {
- var attrs []trace.Attribute
- if info.Err != nil {
- attrs = append(attrs,
- trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error()))
- }
- s.sp.Annotate(attrs, "WroteRequest")
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go
deleted file mode 100644
index ee3729040..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/stats.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
-)
-
-// Deprecated: client HTTP measures.
-var (
- // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect.
- ClientRequestCount = stats.Int64(
- "opencensus.io/http/client/request_count",
- "Number of HTTP requests started",
- stats.UnitDimensionless)
- // Deprecated: Use ClientSentBytes.
- ClientRequestBytes = stats.Int64(
- "opencensus.io/http/client/request_bytes",
- "HTTP request body size if set as ContentLength (uncompressed)",
- stats.UnitBytes)
- // Deprecated: Use ClientReceivedBytes.
- ClientResponseBytes = stats.Int64(
- "opencensus.io/http/client/response_bytes",
- "HTTP response body size (uncompressed)",
- stats.UnitBytes)
- // Deprecated: Use ClientRoundtripLatency.
- ClientLatency = stats.Float64(
- "opencensus.io/http/client/latency",
- "End-to-end latency",
- stats.UnitMilliseconds)
-)
-
-// The following client HTTP measures are supported for use in custom views.
-var (
- ClientSentBytes = stats.Int64(
- "opencensus.io/http/client/sent_bytes",
- "Total bytes sent in request body (not including headers)",
- stats.UnitBytes,
- )
- ClientReceivedBytes = stats.Int64(
- "opencensus.io/http/client/received_bytes",
- "Total bytes received in response bodies (not including headers but including error responses with bodies)",
- stats.UnitBytes,
- )
- ClientRoundtripLatency = stats.Float64(
- "opencensus.io/http/client/roundtrip_latency",
- "Time between first byte of request headers sent to last byte of response received, or terminal error",
- stats.UnitMilliseconds,
- )
-)
-
-// The following server HTTP measures are supported for use in custom views:
-var (
- ServerRequestCount = stats.Int64(
- "opencensus.io/http/server/request_count",
- "Number of HTTP requests started",
- stats.UnitDimensionless)
- ServerRequestBytes = stats.Int64(
- "opencensus.io/http/server/request_bytes",
- "HTTP request body size if set as ContentLength (uncompressed)",
- stats.UnitBytes)
- ServerResponseBytes = stats.Int64(
- "opencensus.io/http/server/response_bytes",
- "HTTP response body size (uncompressed)",
- stats.UnitBytes)
- ServerLatency = stats.Float64(
- "opencensus.io/http/server/latency",
- "End-to-end latency",
- stats.UnitMilliseconds)
-)
-
-// The following tags are applied to stats recorded by this package. Host, Path
-// and Method are applied to all measures. StatusCode is not applied to
-// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known.
-var (
- // Host is the value of the HTTP Host header.
- //
- // The value of this tag can be controlled by the HTTP client, so you need
- // to watch out for potentially generating high-cardinality labels in your
- // metrics backend if you use this tag in views.
- Host = tag.MustNewKey("http.host")
-
- // StatusCode is the numeric HTTP response status code,
- // or "error" if a transport error occurred and no status code was read.
- StatusCode = tag.MustNewKey("http.status")
-
- // Path is the URL path (not including query string) in the request.
- //
- // The value of this tag can be controlled by the HTTP client, so you need
- // to watch out for potentially generating high-cardinality labels in your
- // metrics backend if you use this tag in views.
- Path = tag.MustNewKey("http.path")
-
- // Method is the HTTP method of the request, capitalized (GET, POST, etc.).
- Method = tag.MustNewKey("http.method")
-
- // KeyServerRoute is a low cardinality string representing the logical
- // handler of the request. This is usually the pattern registered on the a
- // ServeMux (or similar string).
- KeyServerRoute = tag.MustNewKey("http_server_route")
-)
-
-// Client tag keys.
-var (
- // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.).
- KeyClientMethod = tag.MustNewKey("http_client_method")
- // KeyClientPath is the URL path (not including query string).
- KeyClientPath = tag.MustNewKey("http_client_path")
- // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received.
- KeyClientStatus = tag.MustNewKey("http_client_status")
- // KeyClientHost is the value of the request Host header.
- KeyClientHost = tag.MustNewKey("http_client_host")
-)
-
-// Default distributions used by views in this package.
-var (
- DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296)
- DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000)
-)
-
-// Package ochttp provides some convenience views for client measures.
-// You still need to register these views for data to actually be collected.
-var (
- ClientSentBytesDistribution = &view.View{
- Name: "opencensus.io/http/client/sent_bytes",
- Measure: ClientSentBytes,
- Aggregation: DefaultSizeDistribution,
- Description: "Total bytes sent in request body (not including headers), by HTTP method and response status",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- }
-
- ClientReceivedBytesDistribution = &view.View{
- Name: "opencensus.io/http/client/received_bytes",
- Measure: ClientReceivedBytes,
- Aggregation: DefaultSizeDistribution,
- Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- }
-
- ClientRoundtripLatencyDistribution = &view.View{
- Name: "opencensus.io/http/client/roundtrip_latency",
- Measure: ClientRoundtripLatency,
- Aggregation: DefaultLatencyDistribution,
- Description: "End-to-end latency, by HTTP method and response status",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- }
-
- ClientCompletedCount = &view.View{
- Name: "opencensus.io/http/client/completed_count",
- Measure: ClientRoundtripLatency,
- Aggregation: view.Count(),
- Description: "Count of completed requests, by HTTP method and response status",
- TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus},
- }
-)
-
-// Deprecated: Old client Views.
-var (
- // Deprecated: No direct replacement, but see ClientCompletedCount.
- ClientRequestCountView = &view.View{
- Name: "opencensus.io/http/client/request_count",
- Description: "Count of HTTP requests started",
- Measure: ClientRequestCount,
- Aggregation: view.Count(),
- }
-
- // Deprecated: Use ClientSentBytesDistribution.
- ClientRequestBytesView = &view.View{
- Name: "opencensus.io/http/client/request_bytes",
- Description: "Size distribution of HTTP request body",
- Measure: ClientSentBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- // Deprecated: Use ClientReceivedBytesDistribution instead.
- ClientResponseBytesView = &view.View{
- Name: "opencensus.io/http/client/response_bytes",
- Description: "Size distribution of HTTP response body",
- Measure: ClientReceivedBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- // Deprecated: Use ClientRoundtripLatencyDistribution instead.
- ClientLatencyView = &view.View{
- Name: "opencensus.io/http/client/latency",
- Description: "Latency distribution of HTTP requests",
- Measure: ClientRoundtripLatency,
- Aggregation: DefaultLatencyDistribution,
- }
-
- // Deprecated: Use ClientCompletedCount instead.
- ClientRequestCountByMethod = &view.View{
- Name: "opencensus.io/http/client/request_count_by_method",
- Description: "Client request count by HTTP method",
- TagKeys: []tag.Key{Method},
- Measure: ClientSentBytes,
- Aggregation: view.Count(),
- }
-
- // Deprecated: Use ClientCompletedCount instead.
- ClientResponseCountByStatusCode = &view.View{
- Name: "opencensus.io/http/client/response_count_by_status_code",
- Description: "Client response count by status code",
- TagKeys: []tag.Key{StatusCode},
- Measure: ClientRoundtripLatency,
- Aggregation: view.Count(),
- }
-)
-
-// Package ochttp provides some convenience views for server measures.
-// You still need to register these views for data to actually be collected.
-var (
- ServerRequestCountView = &view.View{
- Name: "opencensus.io/http/server/request_count",
- Description: "Count of HTTP requests started",
- Measure: ServerRequestCount,
- Aggregation: view.Count(),
- }
-
- ServerRequestBytesView = &view.View{
- Name: "opencensus.io/http/server/request_bytes",
- Description: "Size distribution of HTTP request body",
- Measure: ServerRequestBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- ServerResponseBytesView = &view.View{
- Name: "opencensus.io/http/server/response_bytes",
- Description: "Size distribution of HTTP response body",
- Measure: ServerResponseBytes,
- Aggregation: DefaultSizeDistribution,
- }
-
- ServerLatencyView = &view.View{
- Name: "opencensus.io/http/server/latency",
- Description: "Latency distribution of HTTP requests",
- Measure: ServerLatency,
- Aggregation: DefaultLatencyDistribution,
- }
-
- ServerRequestCountByMethod = &view.View{
- Name: "opencensus.io/http/server/request_count_by_method",
- Description: "Server request count by HTTP method",
- TagKeys: []tag.Key{Method},
- Measure: ServerRequestCount,
- Aggregation: view.Count(),
- }
-
- ServerResponseCountByStatusCode = &view.View{
- Name: "opencensus.io/http/server/response_count_by_status_code",
- Description: "Server response count by status code",
- TagKeys: []tag.Key{StatusCode},
- Measure: ServerLatency,
- Aggregation: view.Count(),
- }
-)
-
-// DefaultClientViews are the default client views provided by this package.
-// Deprecated: No replacement. Register the views you would like individually.
-var DefaultClientViews = []*view.View{
- ClientRequestCountView,
- ClientRequestBytesView,
- ClientResponseBytesView,
- ClientLatencyView,
- ClientRequestCountByMethod,
- ClientResponseCountByStatusCode,
-}
-
-// DefaultServerViews are the default server views provided by this package.
-// Deprecated: No replacement. Register the views you would like individually.
-var DefaultServerViews = []*view.View{
- ServerRequestCountView,
- ServerRequestBytesView,
- ServerResponseBytesView,
- ServerLatencyView,
- ServerRequestCountByMethod,
- ServerResponseCountByStatusCode,
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go
deleted file mode 100644
index ed3a5db56..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/trace.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "io"
- "net/http"
- "net/http/httptrace"
-
- "go.opencensus.io/plugin/ochttp/propagation/b3"
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-// TODO(jbd): Add godoc examples.
-
-var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{}
-
-// Attributes recorded on the span for the requests.
-// Only trace exporters will need them.
-const (
- HostAttribute = "http.host"
- MethodAttribute = "http.method"
- PathAttribute = "http.path"
- URLAttribute = "http.url"
- UserAgentAttribute = "http.user_agent"
- StatusCodeAttribute = "http.status_code"
-)
-
-type traceTransport struct {
- base http.RoundTripper
- startOptions trace.StartOptions
- format propagation.HTTPFormat
- formatSpanName func(*http.Request) string
- newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace
-}
-
-// TODO(jbd): Add message events for request and response size.
-
-// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers.
-// The created span can follow a parent span, if a parent is presented in
-// the request's context.
-func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- name := t.formatSpanName(req)
- // TODO(jbd): Discuss whether we want to prefix
- // outgoing requests with Sent.
- ctx, span := trace.StartSpan(req.Context(), name,
- trace.WithSampler(t.startOptions.Sampler),
- trace.WithSpanKind(trace.SpanKindClient))
-
- if t.newClientTrace != nil {
- req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span)))
- } else {
- req = req.WithContext(ctx)
- }
-
- if t.format != nil {
- // SpanContextToRequest will modify its Request argument, which is
- // contrary to the contract for http.RoundTripper, so we need to
- // pass it a copy of the Request.
- // However, the Request struct itself was already copied by
- // the WithContext calls above and so we just need to copy the header.
- header := make(http.Header)
- for k, v := range req.Header {
- header[k] = v
- }
- req.Header = header
- t.format.SpanContextToRequest(span.SpanContext(), req)
- }
-
- span.AddAttributes(requestAttrs(req)...)
- resp, err := t.base.RoundTrip(req)
- if err != nil {
- span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
- span.End()
- return resp, err
- }
-
- span.AddAttributes(responseAttrs(resp)...)
- span.SetStatus(TraceStatus(resp.StatusCode, resp.Status))
-
- // span.End() will be invoked after
- // a read from resp.Body returns io.EOF or when
- // resp.Body.Close() is invoked.
- bt := &bodyTracker{rc: resp.Body, span: span}
- resp.Body = wrappedBody(bt, resp.Body)
- return resp, err
-}
-
-// bodyTracker wraps a response.Body and invokes
-// trace.EndSpan on encountering io.EOF on reading
-// the body of the original response.
-type bodyTracker struct {
- rc io.ReadCloser
- span *trace.Span
-}
-
-var _ io.ReadCloser = (*bodyTracker)(nil)
-
-func (bt *bodyTracker) Read(b []byte) (int, error) {
- n, err := bt.rc.Read(b)
-
- switch err {
- case nil:
- return n, nil
- case io.EOF:
- bt.span.End()
- default:
- // For all other errors, set the span status
- bt.span.SetStatus(trace.Status{
- // Code 2 is the error code for Internal server error.
- Code: 2,
- Message: err.Error(),
- })
- }
- return n, err
-}
-
-func (bt *bodyTracker) Close() error {
- // Invoking endSpan on Close will help catch the cases
- // in which a read returned a non-nil error, we set the
- // span status but didn't end the span.
- bt.span.End()
- return bt.rc.Close()
-}
-
-// CancelRequest cancels an in-flight request by closing its connection.
-func (t *traceTransport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- if cr, ok := t.base.(canceler); ok {
- cr.CancelRequest(req)
- }
-}
-
-func spanNameFromURL(req *http.Request) string {
- return req.URL.Path
-}
-
-func requestAttrs(r *http.Request) []trace.Attribute {
- userAgent := r.UserAgent()
-
- attrs := make([]trace.Attribute, 0, 5)
- attrs = append(attrs,
- trace.StringAttribute(PathAttribute, r.URL.Path),
- trace.StringAttribute(URLAttribute, r.URL.String()),
- trace.StringAttribute(HostAttribute, r.Host),
- trace.StringAttribute(MethodAttribute, r.Method),
- )
-
- if userAgent != "" {
- attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent))
- }
-
- return attrs
-}
-
-func responseAttrs(resp *http.Response) []trace.Attribute {
- return []trace.Attribute{
- trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)),
- }
-}
-
-// TraceStatus is a utility to convert the HTTP status code to a trace.Status that
-// represents the outcome as closely as possible.
-func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
- var code int32
- if httpStatusCode < 200 || httpStatusCode >= 400 {
- code = trace.StatusCodeUnknown
- }
- switch httpStatusCode {
- case 499:
- code = trace.StatusCodeCancelled
- case http.StatusBadRequest:
- code = trace.StatusCodeInvalidArgument
- case http.StatusUnprocessableEntity:
- code = trace.StatusCodeInvalidArgument
- case http.StatusGatewayTimeout:
- code = trace.StatusCodeDeadlineExceeded
- case http.StatusNotFound:
- code = trace.StatusCodeNotFound
- case http.StatusForbidden:
- code = trace.StatusCodePermissionDenied
- case http.StatusUnauthorized: // 401 is actually unauthenticated.
- code = trace.StatusCodeUnauthenticated
- case http.StatusTooManyRequests:
- code = trace.StatusCodeResourceExhausted
- case http.StatusNotImplemented:
- code = trace.StatusCodeUnimplemented
- case http.StatusServiceUnavailable:
- code = trace.StatusCodeUnavailable
- case http.StatusOK:
- code = trace.StatusCodeOK
- case http.StatusConflict:
- code = trace.StatusCodeAlreadyExists
- }
-
- return trace.Status{Code: code, Message: codeToStr[code]}
-}
-
-var codeToStr = map[int32]string{
- trace.StatusCodeOK: `OK`,
- trace.StatusCodeCancelled: `CANCELLED`,
- trace.StatusCodeUnknown: `UNKNOWN`,
- trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`,
- trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`,
- trace.StatusCodeNotFound: `NOT_FOUND`,
- trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`,
- trace.StatusCodePermissionDenied: `PERMISSION_DENIED`,
- trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`,
- trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`,
- trace.StatusCodeAborted: `ABORTED`,
- trace.StatusCodeOutOfRange: `OUT_OF_RANGE`,
- trace.StatusCodeUnimplemented: `UNIMPLEMENTED`,
- trace.StatusCodeInternal: `INTERNAL`,
- trace.StatusCodeUnavailable: `UNAVAILABLE`,
- trace.StatusCodeDataLoss: `DATA_LOSS`,
- trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`,
-}
-
-func isHealthEndpoint(path string) bool {
- // Health checking is pretty frequent and
- // traces collected for health endpoints
- // can be extremely noisy and expensive.
- // Disable canonical health checking endpoints
- // like /healthz and /_ah/health for now.
- if path == "/healthz" || path == "/_ah/health" {
- return true
- }
- return false
-}
diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go
deleted file mode 100644
index 7d75cae2b..000000000
--- a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ochttp
-
-import (
- "io"
-)
-
-// wrappedBody returns a wrapped version of the original
-// Body and only implements the same combination of additional
-// interfaces as the original.
-func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser {
- var (
- wr, i0 = body.(io.Writer)
- )
- switch {
- case !i0:
- return struct {
- io.ReadCloser
- }{wrapper}
-
- case i0:
- return struct {
- io.ReadCloser
- io.Writer
- }{wrapper, wr}
- default:
- return struct {
- io.ReadCloser
- }{wrapper}
- }
-}
diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go
deleted file mode 100644
index b1764e1d3..000000000
--- a/vendor/go.opencensus.io/resource/resource.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package resource provides functionality for resource, which capture
-// identifying information about the entities for which signals are exported.
-package resource
-
-import (
- "context"
- "fmt"
- "os"
- "regexp"
- "sort"
- "strconv"
- "strings"
-)
-
-// Environment variables used by FromEnv to decode a resource.
-const (
- EnvVarType = "OC_RESOURCE_TYPE"
- EnvVarLabels = "OC_RESOURCE_LABELS"
-)
-
-// Resource describes an entity about which identifying information and metadata is exposed.
-// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace.
-type Resource struct {
- Type string
- Labels map[string]string
-}
-
-// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable.
-func EncodeLabels(labels map[string]string) string {
- sortedKeys := make([]string, 0, len(labels))
- for k := range labels {
- sortedKeys = append(sortedKeys, k)
- }
- sort.Strings(sortedKeys)
-
- s := ""
- for i, k := range sortedKeys {
- if i > 0 {
- s += ","
- }
- s += k + "=" + strconv.Quote(labels[k])
- }
- return s
-}
-
-var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`)
-
-// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable.
-// A list of labels of the form `="",="",...` is accepted.
-// Domain names and paths are accepted as label keys.
-// Most users will want to use FromEnv instead.
-func DecodeLabels(s string) (map[string]string, error) {
- m := map[string]string{}
- // Ensure a trailing comma, which allows us to keep the regex simpler
- s = strings.TrimRight(strings.TrimSpace(s), ",") + ","
-
- for len(s) > 0 {
- match := labelRegex.FindStringSubmatch(s)
- if len(match) == 0 {
- return nil, fmt.Errorf("invalid label formatting, remainder: %s", s)
- }
- v := match[2]
- if v == "" {
- v = match[3]
- } else {
- var err error
- if v, err = strconv.Unquote(v); err != nil {
- return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err)
- }
- }
- m[match[1]] = v
-
- s = s[len(match[0]):]
- }
- return m, nil
-}
-
-// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE
-// and OC_RESOURCE_labelS environment variables.
-func FromEnv(context.Context) (*Resource, error) {
- res := &Resource{
- Type: strings.TrimSpace(os.Getenv(EnvVarType)),
- }
- labels := strings.TrimSpace(os.Getenv(EnvVarLabels))
- if labels == "" {
- return res, nil
- }
- var err error
- if res.Labels, err = DecodeLabels(labels); err != nil {
- return nil, err
- }
- return res, nil
-}
-
-var _ Detector = FromEnv
-
-// merge resource information from b into a. In case of a collision, a takes precedence.
-func merge(a, b *Resource) *Resource {
- if a == nil {
- return b
- }
- if b == nil {
- return a
- }
- res := &Resource{
- Type: a.Type,
- Labels: map[string]string{},
- }
- if res.Type == "" {
- res.Type = b.Type
- }
- for k, v := range b.Labels {
- res.Labels[k] = v
- }
- // Labels from resource a overwrite labels from resource b.
- for k, v := range a.Labels {
- res.Labels[k] = v
- }
- return res
-}
-
-// Detector attempts to detect resource information.
-// If the detector cannot find resource information, the returned resource is nil but no
-// error is returned.
-// An error is only returned on unexpected failures.
-type Detector func(context.Context) (*Resource, error)
-
-// MultiDetector returns a Detector that calls all input detectors in order and
-// merges each result with the previous one. In case a type of label key is already set,
-// the first set value is takes precedence.
-// It returns on the first error that a sub-detector encounters.
-func MultiDetector(detectors ...Detector) Detector {
- return func(ctx context.Context) (*Resource, error) {
- return detectAll(ctx, detectors...)
- }
-}
-
-// detectall calls all input detectors sequentially an merges each result with the previous one.
-// It returns on the first error that a sub-detector encounters.
-func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) {
- var res *Resource
- for _, d := range detectors {
- r, err := d(ctx)
- if err != nil {
- return nil, err
- }
- res = merge(res, r)
- }
- return res, nil
-}
diff --git a/vendor/go.opencensus.io/resource/resourcekeys/const.go b/vendor/go.opencensus.io/resource/resourcekeys/const.go
deleted file mode 100644
index 1f2246662..000000000
--- a/vendor/go.opencensus.io/resource/resourcekeys/const.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package resourcekeys contains well known type and label keys for resources.
-package resourcekeys // import "go.opencensus.io/resource/resourcekeys"
-
-// Constants for Kubernetes resources.
-const (
- K8SType = "k8s"
-
- // A uniquely identifying name for the Kubernetes cluster. Kubernetes
- // does not have cluster names as an internal concept so this may be
- // set to any meaningful value within the environment. For example,
- // GKE clusters have a name which can be used for this label.
- K8SKeyClusterName = "k8s.cluster.name"
- K8SKeyNamespaceName = "k8s.namespace.name"
- K8SKeyPodName = "k8s.pod.name"
- K8SKeyDeploymentName = "k8s.deployment.name"
-)
-
-// Constants for Container resources.
-const (
- ContainerType = "container"
-
- // A uniquely identifying name for the Container.
- ContainerKeyName = "container.name"
- ContainerKeyImageName = "container.image.name"
- ContainerKeyImageTag = "container.image.tag"
-)
-
-// Constants for Cloud resources.
-const (
- CloudType = "cloud"
-
- CloudKeyProvider = "cloud.provider"
- CloudKeyAccountID = "cloud.account.id"
- CloudKeyRegion = "cloud.region"
- CloudKeyZone = "cloud.zone"
-
- // Cloud Providers
- CloudProviderAWS = "aws"
- CloudProviderGCP = "gcp"
- CloudProviderAZURE = "azure"
-)
-
-// Constants for Host resources.
-const (
- HostType = "host"
-
- // A uniquely identifying name for the host.
- HostKeyName = "host.name"
-
- // A hostname as returned by the 'hostname' command on host machine.
- HostKeyHostName = "host.hostname"
- HostKeyID = "host.id"
- HostKeyType = "host.type"
-)
diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go
deleted file mode 100644
index 31477a464..000000000
--- a/vendor/go.opencensus.io/stats/doc.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-/*
-Package stats contains support for OpenCensus stats recording.
-
-OpenCensus allows users to create typed measures, record measurements,
-aggregate the collected data, and export the aggregated data.
-
-# Measures
-
-A measure represents a type of data point to be tracked and recorded.
-For example, latency, request Mb/s, and response Mb/s are measures
-to collect from a server.
-
-Measure constructors such as Int64 and Float64 automatically
-register the measure by the given name. Each registered measure needs
-to be unique by name. Measures also have a description and a unit.
-
-Libraries can define and export measures. Application authors can then
-create views and collect and break down measures by the tags they are
-interested in.
-
-# Recording measurements
-
-Measurement is a data point to be collected for a measure. For example,
-for a latency (ms) measure, 100 is a measurement that represents a 100ms
-latency event. Measurements are created from measures with
-the current context. Tags from the current context are recorded with the
-measurements if they are any.
-
-Recorded measurements are dropped immediately if no views are registered for them.
-There is usually no need to conditionally enable and disable
-recording to reduce cost. Recording of measurements is cheap.
-
-Libraries can always record measurements, and applications can later decide
-on which measurements they want to collect by registering views. This allows
-libraries to turn on the instrumentation by default.
-
-# Exemplars
-
-For a given recorded measurement, the associated exemplar is a diagnostic map
-that gives more information about the measurement.
-
-When aggregated using a Distribution aggregation, an exemplar is kept for each
-bucket in the Distribution. This allows you to easily find an example of a
-measurement that fell into each bucket.
-
-For example, if you also use the OpenCensus trace package and you
-record a measurement with a context that contains a sampled trace span,
-then the trace span will be added to the exemplar associated with the measurement.
-
-When exported to a supporting back end, you should be able to easily navigate
-to example traces that fell into each bucket in the Distribution.
-*/
-package stats // import "go.opencensus.io/stats"
diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go
deleted file mode 100644
index 436dc791f..000000000
--- a/vendor/go.opencensus.io/stats/internal/record.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "go.opencensus.io/tag"
-)
-
-// DefaultRecorder will be called for each Record call.
-var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{})
-
-// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but
-// avoids interface{} conversion.
-// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type,
-// but is interface{} here to avoid import loops
-var MeasurementRecorder interface{}
-
-// SubscriptionReporter reports when a view subscribed with a measure.
-var SubscriptionReporter func(measure string)
diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go
deleted file mode 100644
index 1ffd3cefc..000000000
--- a/vendor/go.opencensus.io/stats/measure.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// Measure represents a single numeric value to be tracked and recorded.
-// For example, latency, request bytes, and response bytes could be measures
-// to collect from a server.
-//
-// Measures by themselves have no outside effects. In order to be exported,
-// the measure needs to be used in a View. If no Views are defined over a
-// measure, there is very little cost in recording it.
-type Measure interface {
- // Name returns the name of this measure.
- //
- // Measure names are globally unique (among all libraries linked into your program).
- // We recommend prefixing the measure name with a domain name relevant to your
- // project or application.
- //
- // Measure names are never sent over the wire or exported to backends.
- // They are only used to create Views.
- Name() string
-
- // Description returns the human-readable description of this measure.
- Description() string
-
- // Unit returns the units for the values this measure takes on.
- //
- // Units are encoded according to the case-sensitive abbreviations from the
- // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
- Unit() string
-}
-
-// measureDescriptor is the untyped descriptor associated with each measure.
-// Int64Measure and Float64Measure wrap measureDescriptor to provide typed
-// recording APIs.
-// Two Measures with the same name will have the same measureDescriptor.
-type measureDescriptor struct {
- subs int32 // access atomically
-
- name string
- description string
- unit string
-}
-
-func (m *measureDescriptor) subscribe() {
- atomic.StoreInt32(&m.subs, 1)
-}
-
-func (m *measureDescriptor) subscribed() bool {
- return atomic.LoadInt32(&m.subs) == 1
-}
-
-var (
- mu sync.RWMutex
- measures = make(map[string]*measureDescriptor)
-)
-
-func registerMeasureHandle(name, desc, unit string) *measureDescriptor {
- mu.Lock()
- defer mu.Unlock()
-
- if stored, ok := measures[name]; ok {
- return stored
- }
- m := &measureDescriptor{
- name: name,
- description: desc,
- unit: unit,
- }
- measures[name] = m
- return m
-}
-
-// Measurement is the numeric value measured when recording stats. Each measure
-// provides methods to create measurements of their kind. For example, Int64Measure
-// provides M to convert an int64 into a measurement.
-type Measurement struct {
- v float64
- m Measure
- desc *measureDescriptor
-}
-
-// Value returns the value of the Measurement as a float64.
-func (m Measurement) Value() float64 {
- return m.v
-}
-
-// Measure returns the Measure from which this Measurement was created.
-func (m Measurement) Measure() Measure {
- return m.m
-}
diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go
deleted file mode 100644
index f02c1eda8..000000000
--- a/vendor/go.opencensus.io/stats/measure_float64.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-// Float64Measure is a measure for float64 values.
-type Float64Measure struct {
- desc *measureDescriptor
-}
-
-// M creates a new float64 measurement.
-// Use Record to record measurements.
-func (m *Float64Measure) M(v float64) Measurement {
- return Measurement{
- m: m,
- desc: m.desc,
- v: v,
- }
-}
-
-// Float64 creates a new measure for float64 values.
-//
-// See the documentation for interface Measure for more guidance on the
-// parameters of this function.
-func Float64(name, description, unit string) *Float64Measure {
- mi := registerMeasureHandle(name, description, unit)
- return &Float64Measure{mi}
-}
-
-// Name returns the name of the measure.
-func (m *Float64Measure) Name() string {
- return m.desc.name
-}
-
-// Description returns the description of the measure.
-func (m *Float64Measure) Description() string {
- return m.desc.description
-}
-
-// Unit returns the unit of the measure.
-func (m *Float64Measure) Unit() string {
- return m.desc.unit
-}
diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go
deleted file mode 100644
index d101d7973..000000000
--- a/vendor/go.opencensus.io/stats/measure_int64.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-// Int64Measure is a measure for int64 values.
-type Int64Measure struct {
- desc *measureDescriptor
-}
-
-// M creates a new int64 measurement.
-// Use Record to record measurements.
-func (m *Int64Measure) M(v int64) Measurement {
- return Measurement{
- m: m,
- desc: m.desc,
- v: float64(v),
- }
-}
-
-// Int64 creates a new measure for int64 values.
-//
-// See the documentation for interface Measure for more guidance on the
-// parameters of this function.
-func Int64(name, description, unit string) *Int64Measure {
- mi := registerMeasureHandle(name, description, unit)
- return &Int64Measure{mi}
-}
-
-// Name returns the name of the measure.
-func (m *Int64Measure) Name() string {
- return m.desc.name
-}
-
-// Description returns the description of the measure.
-func (m *Int64Measure) Description() string {
- return m.desc.description
-}
-
-// Unit returns the unit of the measure.
-func (m *Int64Measure) Unit() string {
- return m.desc.unit
-}
diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go
deleted file mode 100644
index 8b5b99803..000000000
--- a/vendor/go.opencensus.io/stats/record.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-import (
- "context"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-func init() {
- internal.SubscriptionReporter = func(measure string) {
- mu.Lock()
- measures[measure].subscribe()
- mu.Unlock()
- }
-}
-
-// Recorder provides an interface for exporting measurement information from
-// the static Record method by using the WithRecorder option.
-type Recorder interface {
- // Record records a set of measurements associated with the given tags and attachments.
- // The second argument is a `[]Measurement`.
- Record(*tag.Map, interface{}, map[string]interface{})
-}
-
-type recordOptions struct {
- attachments metricdata.Attachments
- mutators []tag.Mutator
- measurements []Measurement
- recorder Recorder
-}
-
-// WithAttachments applies provided exemplar attachments.
-func WithAttachments(attachments metricdata.Attachments) Options {
- return func(ro *recordOptions) {
- ro.attachments = attachments
- }
-}
-
-// WithTags applies provided tag mutators.
-func WithTags(mutators ...tag.Mutator) Options {
- return func(ro *recordOptions) {
- ro.mutators = mutators
- }
-}
-
-// WithMeasurements applies provided measurements.
-func WithMeasurements(measurements ...Measurement) Options {
- return func(ro *recordOptions) {
- ro.measurements = measurements
- }
-}
-
-// WithRecorder records the measurements to the specified `Recorder`, rather
-// than to the global metrics recorder.
-func WithRecorder(meter Recorder) Options {
- return func(ro *recordOptions) {
- ro.recorder = meter
- }
-}
-
-// Options apply changes to recordOptions.
-type Options func(*recordOptions)
-
-func createRecordOption(ros ...Options) *recordOptions {
- o := &recordOptions{}
- for _, ro := range ros {
- ro(o)
- }
- return o
-}
-
-type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{})
-
-// Record records one or multiple measurements with the same context at once.
-// If there are any tags in the context, measurements will be tagged with them.
-func Record(ctx context.Context, ms ...Measurement) {
- // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality
- // (RecordOptions) we can reduce some allocations to speed up this hot path
- if len(ms) == 0 {
- return
- }
- recorder := internal.MeasurementRecorder.(measurementRecorder)
- record := false
- for _, m := range ms {
- if m.desc.subscribed() {
- record = true
- break
- }
- }
- if !record {
- return
- }
- recorder(tag.FromContext(ctx), ms, nil)
- return
-}
-
-// RecordWithTags records one or multiple measurements at once.
-//
-// Measurements will be tagged with the tags in the context mutated by the mutators.
-// RecordWithTags is useful if you want to record with tag mutations but don't want
-// to propagate the mutations in the context.
-func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error {
- return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...))
-}
-
-// RecordWithOptions records measurements from the given options (if any) against context
-// and tags and attachments in the options (if any).
-// If there are any tags in the context, measurements will be tagged with them.
-func RecordWithOptions(ctx context.Context, ros ...Options) error {
- o := createRecordOption(ros...)
- if len(o.measurements) == 0 {
- return nil
- }
- recorder := internal.DefaultRecorder
- if o.recorder != nil {
- recorder = o.recorder.Record
- }
- if recorder == nil {
- return nil
- }
- record := false
- for _, m := range o.measurements {
- if m.desc.subscribed() {
- record = true
- break
- }
- }
- if !record {
- return nil
- }
- if len(o.mutators) > 0 {
- var err error
- if ctx, err = tag.New(ctx, o.mutators...); err != nil {
- return err
- }
- }
- recorder(tag.FromContext(ctx), o.measurements, o.attachments)
- return nil
-}
diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go
deleted file mode 100644
index 736399652..000000000
--- a/vendor/go.opencensus.io/stats/units.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package stats
-
-// Units are encoded according to the case-sensitive abbreviations from the
-// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html
-const (
- UnitNone = "1" // Deprecated: Use UnitDimensionless.
- UnitDimensionless = "1"
- UnitBytes = "By"
- UnitMilliseconds = "ms"
- UnitSeconds = "s"
-)
diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go
deleted file mode 100644
index 61f72d20d..000000000
--- a/vendor/go.opencensus.io/stats/view/aggregation.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import "time"
-
-// AggType represents the type of aggregation function used on a View.
-type AggType int
-
-// All available aggregation types.
-const (
- AggTypeNone AggType = iota // no aggregation; reserved for future use.
- AggTypeCount // the count aggregation, see Count.
- AggTypeSum // the sum aggregation, see Sum.
- AggTypeDistribution // the distribution aggregation, see Distribution.
- AggTypeLastValue // the last value aggregation, see LastValue.
-)
-
-func (t AggType) String() string {
- return aggTypeName[t]
-}
-
-var aggTypeName = map[AggType]string{
- AggTypeNone: "None",
- AggTypeCount: "Count",
- AggTypeSum: "Sum",
- AggTypeDistribution: "Distribution",
- AggTypeLastValue: "LastValue",
-}
-
-// Aggregation represents a data aggregation method. Use one of the functions:
-// Count, Sum, or Distribution to construct an Aggregation.
-type Aggregation struct {
- Type AggType // Type is the AggType of this Aggregation.
- Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution.
-
- newData func(time.Time) AggregationData
-}
-
-var (
- aggCount = &Aggregation{
- Type: AggTypeCount,
- newData: func(t time.Time) AggregationData {
- return &CountData{Start: t}
- },
- }
- aggSum = &Aggregation{
- Type: AggTypeSum,
- newData: func(t time.Time) AggregationData {
- return &SumData{Start: t}
- },
- }
-)
-
-// Count indicates that data collected and aggregated
-// with this method will be turned into a count value.
-// For example, total number of accepted requests can be
-// aggregated by using Count.
-func Count() *Aggregation {
- return aggCount
-}
-
-// Sum indicates that data collected and aggregated
-// with this method will be summed up.
-// For example, accumulated request bytes can be aggregated by using
-// Sum.
-func Sum() *Aggregation {
- return aggSum
-}
-
-// Distribution indicates that the desired aggregation is
-// a histogram distribution.
-//
-// A distribution aggregation may contain a histogram of the values in the
-// population. The bucket boundaries for that histogram are described
-// by the bounds. This defines len(bounds)+1 buckets.
-//
-// If len(bounds) >= 2 then the boundaries for bucket index i are:
-//
-// [-infinity, bounds[i]) for i = 0
-// [bounds[i-1], bounds[i]) for 0 < i < length
-// [bounds[i-1], +infinity) for i = length
-//
-// If len(bounds) is 0 then there is no histogram associated with the
-// distribution. There will be a single bucket with boundaries
-// (-infinity, +infinity).
-//
-// If len(bounds) is 1 then there is no finite buckets, and that single
-// element is the common boundary of the overflow and underflow buckets.
-func Distribution(bounds ...float64) *Aggregation {
- agg := &Aggregation{
- Type: AggTypeDistribution,
- Buckets: bounds,
- }
- agg.newData = func(t time.Time) AggregationData {
- return newDistributionData(agg, t)
- }
- return agg
-}
-
-// LastValue only reports the last value recorded using this
-// aggregation. All other measurements will be dropped.
-func LastValue() *Aggregation {
- return &Aggregation{
- Type: AggTypeLastValue,
- newData: func(_ time.Time) AggregationData {
- return &LastValueData{}
- },
- }
-}
diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go
deleted file mode 100644
index d93b52066..000000000
--- a/vendor/go.opencensus.io/stats/view/aggregation_data.go
+++ /dev/null
@@ -1,336 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "math"
- "time"
-
- "go.opencensus.io/metric/metricdata"
-)
-
-// AggregationData represents an aggregated value from a collection.
-// They are reported on the view data during exporting.
-// Mosts users won't directly access aggregration data.
-type AggregationData interface {
- isAggregationData() bool
- addSample(v float64, attachments map[string]interface{}, t time.Time)
- clone() AggregationData
- equal(other AggregationData) bool
- toPoint(t metricdata.Type, time time.Time) metricdata.Point
- StartTime() time.Time
-}
-
-const epsilon = 1e-9
-
-// CountData is the aggregated data for the Count aggregation.
-// A count aggregation processes data and counts the recordings.
-//
-// Most users won't directly access count data.
-type CountData struct {
- Start time.Time
- Value int64
-}
-
-func (a *CountData) isAggregationData() bool { return true }
-
-func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) {
- a.Value = a.Value + 1
-}
-
-func (a *CountData) clone() AggregationData {
- return &CountData{Value: a.Value, Start: a.Start}
-}
-
-func (a *CountData) equal(other AggregationData) bool {
- a2, ok := other.(*CountData)
- if !ok {
- return false
- }
-
- return a.Start.Equal(a2.Start) && a.Value == a2.Value
-}
-
-func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
- switch metricType {
- case metricdata.TypeCumulativeInt64:
- return metricdata.NewInt64Point(t, a.Value)
- default:
- panic("unsupported metricdata.Type")
- }
-}
-
-// StartTime returns the start time of the data being aggregated by CountData.
-func (a *CountData) StartTime() time.Time {
- return a.Start
-}
-
-// SumData is the aggregated data for the Sum aggregation.
-// A sum aggregation processes data and sums up the recordings.
-//
-// Most users won't directly access sum data.
-type SumData struct {
- Start time.Time
- Value float64
-}
-
-func (a *SumData) isAggregationData() bool { return true }
-
-func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
- a.Value += v
-}
-
-func (a *SumData) clone() AggregationData {
- return &SumData{Value: a.Value, Start: a.Start}
-}
-
-func (a *SumData) equal(other AggregationData) bool {
- a2, ok := other.(*SumData)
- if !ok {
- return false
- }
- return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon
-}
-
-func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
- switch metricType {
- case metricdata.TypeCumulativeInt64:
- return metricdata.NewInt64Point(t, int64(a.Value))
- case metricdata.TypeCumulativeFloat64:
- return metricdata.NewFloat64Point(t, a.Value)
- default:
- panic("unsupported metricdata.Type")
- }
-}
-
-// StartTime returns the start time of the data being aggregated by SumData.
-func (a *SumData) StartTime() time.Time {
- return a.Start
-}
-
-// DistributionData is the aggregated data for the
-// Distribution aggregation.
-//
-// Most users won't directly access distribution data.
-//
-// For a distribution with N bounds, the associated DistributionData will have
-// N+1 buckets.
-type DistributionData struct {
- Count int64 // number of data points aggregated
- Min float64 // minimum value in the distribution
- Max float64 // max value in the distribution
- Mean float64 // mean of the distribution
- SumOfSquaredDev float64 // sum of the squared deviation from the mean
- CountPerBucket []int64 // number of occurrences per bucket
- // ExemplarsPerBucket is slice the same length as CountPerBucket containing
- // an exemplar for the associated bucket, or nil.
- ExemplarsPerBucket []*metricdata.Exemplar
- bounds []float64 // histogram distribution of the values
- Start time.Time
-}
-
-func newDistributionData(agg *Aggregation, t time.Time) *DistributionData {
- bucketCount := len(agg.Buckets) + 1
- return &DistributionData{
- CountPerBucket: make([]int64, bucketCount),
- ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount),
- bounds: agg.Buckets,
- Min: math.MaxFloat64,
- Max: math.SmallestNonzeroFloat64,
- Start: t,
- }
-}
-
-// Sum returns the sum of all samples collected.
-func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) }
-
-func (a *DistributionData) variance() float64 {
- if a.Count <= 1 {
- return 0
- }
- return a.SumOfSquaredDev / float64(a.Count-1)
-}
-
-func (a *DistributionData) isAggregationData() bool { return true }
-
-// TODO(songy23): support exemplar attachments.
-func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) {
- if v < a.Min {
- a.Min = v
- }
- if v > a.Max {
- a.Max = v
- }
- a.Count++
- a.addToBucket(v, attachments, t)
-
- if a.Count == 1 {
- a.Mean = v
- return
- }
-
- oldMean := a.Mean
- a.Mean = a.Mean + (v-a.Mean)/float64(a.Count)
- a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean)
-}
-
-func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) {
- var count *int64
- var i int
- var b float64
- for i, b = range a.bounds {
- if v < b {
- count = &a.CountPerBucket[i]
- break
- }
- }
- if count == nil { // Last bucket.
- i = len(a.bounds)
- count = &a.CountPerBucket[i]
- }
- *count++
- if exemplar := getExemplar(v, attachments, t); exemplar != nil {
- a.ExemplarsPerBucket[i] = exemplar
- }
-}
-
-func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar {
- if len(attachments) == 0 {
- return nil
- }
- return &metricdata.Exemplar{
- Value: v,
- Timestamp: t,
- Attachments: attachments,
- }
-}
-
-func (a *DistributionData) clone() AggregationData {
- c := *a
- c.CountPerBucket = append([]int64(nil), a.CountPerBucket...)
- c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...)
- return &c
-}
-
-func (a *DistributionData) equal(other AggregationData) bool {
- a2, ok := other.(*DistributionData)
- if !ok {
- return false
- }
- if a2 == nil {
- return false
- }
- if len(a.CountPerBucket) != len(a2.CountPerBucket) {
- return false
- }
- for i := range a.CountPerBucket {
- if a.CountPerBucket[i] != a2.CountPerBucket[i] {
- return false
- }
- }
- return a.Start.Equal(a2.Start) &&
- a.Count == a2.Count &&
- a.Min == a2.Min &&
- a.Max == a2.Max &&
- math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon
-}
-
-func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
- switch metricType {
- case metricdata.TypeCumulativeDistribution:
- buckets := []metricdata.Bucket{}
- for i := 0; i < len(a.CountPerBucket); i++ {
- buckets = append(buckets, metricdata.Bucket{
- Count: a.CountPerBucket[i],
- Exemplar: a.ExemplarsPerBucket[i],
- })
- }
- bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds}
-
- val := &metricdata.Distribution{
- Count: a.Count,
- Sum: a.Sum(),
- SumOfSquaredDeviation: a.SumOfSquaredDev,
- BucketOptions: bucketOptions,
- Buckets: buckets,
- }
- return metricdata.NewDistributionPoint(t, val)
-
- default:
- // TODO: [rghetia] when we have a use case for TypeGaugeDistribution.
- panic("unsupported metricdata.Type")
- }
-}
-
-// StartTime returns the start time of the data being aggregated by DistributionData.
-func (a *DistributionData) StartTime() time.Time {
- return a.Start
-}
-
-// LastValueData returns the last value recorded for LastValue aggregation.
-type LastValueData struct {
- Value float64
-}
-
-func (l *LastValueData) isAggregationData() bool {
- return true
-}
-
-func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) {
- l.Value = v
-}
-
-func (l *LastValueData) clone() AggregationData {
- return &LastValueData{l.Value}
-}
-
-func (l *LastValueData) equal(other AggregationData) bool {
- a2, ok := other.(*LastValueData)
- if !ok {
- return false
- }
- return l.Value == a2.Value
-}
-
-func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point {
- switch metricType {
- case metricdata.TypeGaugeInt64:
- return metricdata.NewInt64Point(t, int64(l.Value))
- case metricdata.TypeGaugeFloat64:
- return metricdata.NewFloat64Point(t, l.Value)
- default:
- panic("unsupported metricdata.Type")
- }
-}
-
-// StartTime returns an empty time value as start time is not recorded when using last value
-// aggregation.
-func (l *LastValueData) StartTime() time.Time {
- return time.Time{}
-}
-
-// ClearStart clears the Start field from data if present. Useful for testing in cases where the
-// start time will be nondeterministic.
-func ClearStart(data AggregationData) {
- switch data := data.(type) {
- case *CountData:
- data.Start = time.Time{}
- case *SumData:
- data.Start = time.Time{}
- case *DistributionData:
- data.Start = time.Time{}
- }
-}
diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go
deleted file mode 100644
index bcd6e08c7..000000000
--- a/vendor/go.opencensus.io/stats/view/collector.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "sort"
- "time"
-
- "go.opencensus.io/internal/tagencoding"
- "go.opencensus.io/tag"
-)
-
-type collector struct {
- // signatures holds the aggregations values for each unique tag signature
- // (values for all keys) to its aggregator.
- signatures map[string]AggregationData
- // Aggregation is the description of the aggregation to perform for this
- // view.
- a *Aggregation
-}
-
-func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) {
- aggregator, ok := c.signatures[s]
- if !ok {
- aggregator = c.a.newData(t)
- c.signatures[s] = aggregator
- }
- aggregator.addSample(v, attachments, t)
-}
-
-// collectRows returns a snapshot of the collected Row values.
-func (c *collector) collectedRows(keys []tag.Key) []*Row {
- rows := make([]*Row, 0, len(c.signatures))
- for sig, aggregator := range c.signatures {
- tags := decodeTags([]byte(sig), keys)
- row := &Row{Tags: tags, Data: aggregator.clone()}
- rows = append(rows, row)
- }
- return rows
-}
-
-func (c *collector) clearRows() {
- c.signatures = make(map[string]AggregationData)
-}
-
-// encodeWithKeys encodes the map by using values
-// only associated with the keys provided.
-func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte {
- // Compute the buffer length we will need ahead of time to avoid resizing later
- reqLen := 0
- for _, k := range keys {
- s, _ := m.Value(k)
- // We will store each key + its length
- reqLen += len(s) + 1
- }
- vb := &tagencoding.Values{
- Buffer: make([]byte, reqLen),
- }
- for _, k := range keys {
- v, _ := m.Value(k)
- vb.WriteValue([]byte(v))
- }
- return vb.Bytes()
-}
-
-// decodeTags decodes tags from the buffer and
-// orders them by the keys.
-func decodeTags(buf []byte, keys []tag.Key) []tag.Tag {
- vb := &tagencoding.Values{Buffer: buf}
- var tags []tag.Tag
- for _, k := range keys {
- v := vb.ReadValue()
- if v != nil {
- tags = append(tags, tag.Tag{Key: k, Value: string(v)})
- }
- }
- vb.ReadIndex = 0
- sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() })
- return tags
-}
diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go
deleted file mode 100644
index 60bf0e392..000000000
--- a/vendor/go.opencensus.io/stats/view/doc.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// Package view contains support for collecting and exposing aggregates over stats.
-//
-// In order to collect measurements, views need to be defined and registered.
-// A view allows recorded measurements to be filtered and aggregated.
-//
-// All recorded measurements can be grouped by a list of tags.
-//
-// OpenCensus provides several aggregation methods: Count, Distribution and Sum.
-//
-// Count only counts the number of measurement points recorded.
-// Distribution provides statistical summary of the aggregated data by counting
-// how many recorded measurements fall into each bucket.
-// Sum adds up the measurement values.
-// LastValue just keeps track of the most recently recorded measurement value.
-// All aggregations are cumulative.
-//
-// Views can be registered and unregistered at any time during program execution.
-//
-// Libraries can define views but it is recommended that in most cases registering
-// views be left up to applications.
-//
-// # Exporting
-//
-// Collected and aggregated data can be exported to a metric collection
-// backend by registering its exporter.
-//
-// Multiple exporters can be registered to upload the data to various
-// different back ends.
-package view // import "go.opencensus.io/stats/view"
-
-// TODO(acetechnologist): Add a link to the language independent OpenCensus
-// spec when it is available.
diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go
deleted file mode 100644
index 73ba11f5b..000000000
--- a/vendor/go.opencensus.io/stats/view/export.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package view
-
-// Exporter exports the collected records as view data.
-//
-// The ExportView method should return quickly; if an
-// Exporter takes a significant amount of time to
-// process a Data, that work should be done on another goroutine.
-//
-// It is safe to assume that ExportView will not be called concurrently from
-// multiple goroutines.
-//
-// The Data should not be modified.
-type Exporter interface {
- ExportView(viewData *Data)
-}
-
-// RegisterExporter registers an exporter.
-// Collected data will be reported via all the
-// registered exporters. Once you no longer
-// want data to be exported, invoke UnregisterExporter
-// with the previously registered exporter.
-//
-// Binaries can register exporters, libraries shouldn't register exporters.
-func RegisterExporter(e Exporter) {
- defaultWorker.RegisterExporter(e)
-}
-
-// UnregisterExporter unregisters an exporter.
-func UnregisterExporter(e Exporter) {
- defaultWorker.UnregisterExporter(e)
-}
diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go
deleted file mode 100644
index 293b54ecb..000000000
--- a/vendor/go.opencensus.io/stats/view/view.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "bytes"
- "errors"
- "fmt"
- "reflect"
- "sort"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/stats"
- "go.opencensus.io/tag"
-)
-
-// View allows users to aggregate the recorded stats.Measurements.
-// Views need to be passed to the Register function before data will be
-// collected and sent to Exporters.
-type View struct {
- Name string // Name of View. Must be unique. If unset, will default to the name of the Measure.
- Description string // Description is a human-readable description for this view.
-
- // TagKeys are the tag keys describing the grouping of this view.
- // A single Row will be produced for each combination of associated tag values.
- TagKeys []tag.Key
-
- // Measure is a stats.Measure to aggregate in this view.
- Measure stats.Measure
-
- // Aggregation is the aggregation function to apply to the set of Measurements.
- Aggregation *Aggregation
-}
-
-// WithName returns a copy of the View with a new name. This is useful for
-// renaming views to cope with limitations placed on metric names by various
-// backends.
-func (v *View) WithName(name string) *View {
- vNew := *v
- vNew.Name = name
- return &vNew
-}
-
-// same compares two views and returns true if they represent the same aggregation.
-func (v *View) same(other *View) bool {
- if v == other {
- return true
- }
- if v == nil {
- return false
- }
- return reflect.DeepEqual(v.Aggregation, other.Aggregation) &&
- v.Measure.Name() == other.Measure.Name()
-}
-
-// ErrNegativeBucketBounds error returned if histogram contains negative bounds.
-//
-// Deprecated: this should not be public.
-var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported")
-
-// canonicalize canonicalizes v by setting explicit
-// defaults for Name and Description and sorting the TagKeys
-func (v *View) canonicalize() error {
- if v.Measure == nil {
- return fmt.Errorf("cannot register view %q: measure not set", v.Name)
- }
- if v.Aggregation == nil {
- return fmt.Errorf("cannot register view %q: aggregation not set", v.Name)
- }
- if v.Name == "" {
- v.Name = v.Measure.Name()
- }
- if v.Description == "" {
- v.Description = v.Measure.Description()
- }
- if err := checkViewName(v.Name); err != nil {
- return err
- }
- sort.Slice(v.TagKeys, func(i, j int) bool {
- return v.TagKeys[i].Name() < v.TagKeys[j].Name()
- })
- sort.Float64s(v.Aggregation.Buckets)
- for _, b := range v.Aggregation.Buckets {
- if b < 0 {
- return ErrNegativeBucketBounds
- }
- }
- // drop 0 bucket silently.
- v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...)
-
- return nil
-}
-
-func dropZeroBounds(bounds ...float64) []float64 {
- for i, bound := range bounds {
- if bound > 0 {
- return bounds[i:]
- }
- }
- return []float64{}
-}
-
-// viewInternal is the internal representation of a View.
-type viewInternal struct {
- view *View // view is the canonicalized View definition associated with this view.
- subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access
- collector *collector
- metricDescriptor *metricdata.Descriptor
-}
-
-func newViewInternal(v *View) (*viewInternal, error) {
- return &viewInternal{
- view: v,
- collector: &collector{make(map[string]AggregationData), v.Aggregation},
- metricDescriptor: viewToMetricDescriptor(v),
- }, nil
-}
-
-func (v *viewInternal) subscribe() {
- atomic.StoreUint32(&v.subscribed, 1)
-}
-
-func (v *viewInternal) unsubscribe() {
- atomic.StoreUint32(&v.subscribed, 0)
-}
-
-// isSubscribed returns true if the view is exporting
-// data by subscription.
-func (v *viewInternal) isSubscribed() bool {
- return atomic.LoadUint32(&v.subscribed) == 1
-}
-
-func (v *viewInternal) clearRows() {
- v.collector.clearRows()
-}
-
-func (v *viewInternal) collectedRows() []*Row {
- return v.collector.collectedRows(v.view.TagKeys)
-}
-
-func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) {
- if !v.isSubscribed() {
- return
- }
- sig := string(encodeWithKeys(m, v.view.TagKeys))
- v.collector.addSample(sig, val, attachments, t)
-}
-
-// A Data is a set of rows about usage of the single measure associated
-// with the given view. Each row is specific to a unique set of tags.
-type Data struct {
- View *View
- Start, End time.Time
- Rows []*Row
-}
-
-// Row is the collected value for a specific set of key value pairs a.k.a tags.
-type Row struct {
- Tags []tag.Tag
- Data AggregationData
-}
-
-func (r *Row) String() string {
- var buffer bytes.Buffer
- buffer.WriteString("{ ")
- buffer.WriteString("{ ")
- for _, t := range r.Tags {
- buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value))
- }
- buffer.WriteString(" }")
- buffer.WriteString(fmt.Sprintf("%v", r.Data))
- buffer.WriteString(" }")
- return buffer.String()
-}
-
-// Equal returns true if both rows are equal. Tags are expected to be ordered
-// by the key name. Even if both rows have the same tags but the tags appear in
-// different orders it will return false.
-func (r *Row) Equal(other *Row) bool {
- if r == other {
- return true
- }
- return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data)
-}
-
-const maxNameLength = 255
-
-// Returns true if the given string contains only printable characters.
-func isPrintable(str string) bool {
- for _, r := range str {
- if !(r >= ' ' && r <= '~') {
- return false
- }
- }
- return true
-}
-
-func checkViewName(name string) error {
- if len(name) > maxNameLength {
- return fmt.Errorf("view name cannot be larger than %v", maxNameLength)
- }
- if !isPrintable(name) {
- return fmt.Errorf("view name needs to be an ASCII string")
- }
- return nil
-}
diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go
deleted file mode 100644
index 57d615ec7..000000000
--- a/vendor/go.opencensus.io/stats/view/view_to_metric.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "time"
-
- "go.opencensus.io/resource"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/stats"
-)
-
-func getUnit(unit string) metricdata.Unit {
- switch unit {
- case "1":
- return metricdata.UnitDimensionless
- case "ms":
- return metricdata.UnitMilliseconds
- case "By":
- return metricdata.UnitBytes
- }
- return metricdata.UnitDimensionless
-}
-
-func getType(v *View) metricdata.Type {
- m := v.Measure
- agg := v.Aggregation
-
- switch agg.Type {
- case AggTypeSum:
- switch m.(type) {
- case *stats.Int64Measure:
- return metricdata.TypeCumulativeInt64
- case *stats.Float64Measure:
- return metricdata.TypeCumulativeFloat64
- default:
- panic("unexpected measure type")
- }
- case AggTypeDistribution:
- return metricdata.TypeCumulativeDistribution
- case AggTypeLastValue:
- switch m.(type) {
- case *stats.Int64Measure:
- return metricdata.TypeGaugeInt64
- case *stats.Float64Measure:
- return metricdata.TypeGaugeFloat64
- default:
- panic("unexpected measure type")
- }
- case AggTypeCount:
- switch m.(type) {
- case *stats.Int64Measure:
- return metricdata.TypeCumulativeInt64
- case *stats.Float64Measure:
- return metricdata.TypeCumulativeInt64
- default:
- panic("unexpected measure type")
- }
- default:
- panic("unexpected aggregation type")
- }
-}
-
-func getLabelKeys(v *View) []metricdata.LabelKey {
- labelKeys := []metricdata.LabelKey{}
- for _, k := range v.TagKeys {
- labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()})
- }
- return labelKeys
-}
-
-func viewToMetricDescriptor(v *View) *metricdata.Descriptor {
- return &metricdata.Descriptor{
- Name: v.Name,
- Description: v.Description,
- Unit: convertUnit(v),
- Type: getType(v),
- LabelKeys: getLabelKeys(v),
- }
-}
-
-func convertUnit(v *View) metricdata.Unit {
- switch v.Aggregation.Type {
- case AggTypeCount:
- return metricdata.UnitDimensionless
- default:
- return getUnit(v.Measure.Unit())
- }
-}
-
-func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue {
- labelValues := []metricdata.LabelValue{}
- tagMap := make(map[string]string)
- for _, tag := range row.Tags {
- tagMap[tag.Key.Name()] = tag.Value
- }
-
- for _, key := range expectedKeys {
- if val, ok := tagMap[key.Key]; ok {
- labelValues = append(labelValues, metricdata.NewLabelValue(val))
- } else {
- labelValues = append(labelValues, metricdata.LabelValue{})
- }
- }
- return labelValues
-}
-
-func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries {
- return &metricdata.TimeSeries{
- Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)},
- LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys),
- StartTime: row.Data.StartTime(),
- }
-}
-
-func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric {
- rows := v.collectedRows()
- if len(rows) == 0 {
- return nil
- }
-
- ts := []*metricdata.TimeSeries{}
- for _, row := range rows {
- ts = append(ts, rowToTimeseries(v, row, now))
- }
-
- m := &metricdata.Metric{
- Descriptor: *v.metricDescriptor,
- TimeSeries: ts,
- Resource: r,
- }
- return m
-}
diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go
deleted file mode 100644
index 6a79cd8a3..000000000
--- a/vendor/go.opencensus.io/stats/view/worker.go
+++ /dev/null
@@ -1,424 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "fmt"
- "sync"
- "time"
-
- "go.opencensus.io/resource"
-
- "go.opencensus.io/metric/metricdata"
- "go.opencensus.io/metric/metricproducer"
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-func init() {
- defaultWorker = NewMeter().(*worker)
- go defaultWorker.start()
- internal.DefaultRecorder = record
- internal.MeasurementRecorder = recordMeasurement
-}
-
-type measureRef struct {
- measure string
- views map[*viewInternal]struct{}
-}
-
-type worker struct {
- measures map[string]*measureRef
- views map[string]*viewInternal
- viewStartTimes map[*viewInternal]time.Time
-
- timer *time.Ticker
- c chan command
- quit, done chan bool
- mu sync.RWMutex
- r *resource.Resource
-
- exportersMu sync.RWMutex
- exporters map[Exporter]struct{}
-}
-
-// Meter defines an interface which allows a single process to maintain
-// multiple sets of metrics exports (intended for the advanced case where a
-// single process wants to report metrics about multiple objects, such as
-// multiple databases or HTTP services).
-//
-// Note that this is an advanced use case, and the static functions in this
-// module should cover the common use cases.
-type Meter interface {
- stats.Recorder
- // Find returns a registered view associated with this name.
- // If no registered view is found, nil is returned.
- Find(name string) *View
- // Register begins collecting data for the given views.
- // Once a view is registered, it reports data to the registered exporters.
- Register(views ...*View) error
- // Unregister the given views. Data will not longer be exported for these views
- // after Unregister returns.
- // It is not necessary to unregister from views you expect to collect for the
- // duration of your program execution.
- Unregister(views ...*View)
- // SetReportingPeriod sets the interval between reporting aggregated views in
- // the program. If duration is less than or equal to zero, it enables the
- // default behavior.
- //
- // Note: each exporter makes different promises about what the lowest supported
- // duration is. For example, the Stackdriver exporter recommends a value no
- // lower than 1 minute. Consult each exporter per your needs.
- SetReportingPeriod(time.Duration)
-
- // RegisterExporter registers an exporter.
- // Collected data will be reported via all the
- // registered exporters. Once you no longer
- // want data to be exported, invoke UnregisterExporter
- // with the previously registered exporter.
- //
- // Binaries can register exporters, libraries shouldn't register exporters.
- RegisterExporter(Exporter)
- // UnregisterExporter unregisters an exporter.
- UnregisterExporter(Exporter)
- // SetResource may be used to set the Resource associated with this registry.
- // This is intended to be used in cases where a single process exports metrics
- // for multiple Resources, typically in a multi-tenant situation.
- SetResource(*resource.Resource)
-
- // Start causes the Meter to start processing Record calls and aggregating
- // statistics as well as exporting data.
- Start()
- // Stop causes the Meter to stop processing calls and terminate data export.
- Stop()
-
- // RetrieveData gets a snapshot of the data collected for the the view registered
- // with the given name. It is intended for testing only.
- RetrieveData(viewName string) ([]*Row, error)
-}
-
-var _ Meter = (*worker)(nil)
-
-var defaultWorker *worker
-
-var defaultReportingDuration = 10 * time.Second
-
-// Find returns a registered view associated with this name.
-// If no registered view is found, nil is returned.
-func Find(name string) (v *View) {
- return defaultWorker.Find(name)
-}
-
-// Find returns a registered view associated with this name.
-// If no registered view is found, nil is returned.
-func (w *worker) Find(name string) (v *View) {
- req := &getViewByNameReq{
- name: name,
- c: make(chan *getViewByNameResp),
- }
- w.c <- req
- resp := <-req.c
- return resp.v
-}
-
-// Register begins collecting data for the given views.
-// Once a view is registered, it reports data to the registered exporters.
-func Register(views ...*View) error {
- return defaultWorker.Register(views...)
-}
-
-// Register begins collecting data for the given views.
-// Once a view is registered, it reports data to the registered exporters.
-func (w *worker) Register(views ...*View) error {
- req := ®isterViewReq{
- views: views,
- err: make(chan error),
- }
- w.c <- req
- return <-req.err
-}
-
-// Unregister the given views. Data will not longer be exported for these views
-// after Unregister returns.
-// It is not necessary to unregister from views you expect to collect for the
-// duration of your program execution.
-func Unregister(views ...*View) {
- defaultWorker.Unregister(views...)
-}
-
-// Unregister the given views. Data will not longer be exported for these views
-// after Unregister returns.
-// It is not necessary to unregister from views you expect to collect for the
-// duration of your program execution.
-func (w *worker) Unregister(views ...*View) {
- names := make([]string, len(views))
- for i := range views {
- names[i] = views[i].Name
- }
- req := &unregisterFromViewReq{
- views: names,
- done: make(chan struct{}),
- }
- w.c <- req
- <-req.done
-}
-
-// RetrieveData gets a snapshot of the data collected for the the view registered
-// with the given name. It is intended for testing only.
-func RetrieveData(viewName string) ([]*Row, error) {
- return defaultWorker.RetrieveData(viewName)
-}
-
-// RetrieveData gets a snapshot of the data collected for the the view registered
-// with the given name. It is intended for testing only.
-func (w *worker) RetrieveData(viewName string) ([]*Row, error) {
- req := &retrieveDataReq{
- now: time.Now(),
- v: viewName,
- c: make(chan *retrieveDataResp),
- }
- w.c <- req
- resp := <-req.c
- return resp.rows, resp.err
-}
-
-func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
- defaultWorker.Record(tags, ms, attachments)
-}
-
-func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
- defaultWorker.recordMeasurement(tags, ms, attachments)
-}
-
-// Record records a set of measurements ms associated with the given tags and attachments.
-func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
- w.recordMeasurement(tags, ms.([]stats.Measurement), attachments)
-}
-
-// recordMeasurement records a set of measurements ms associated with the given tags and attachments.
-// This is the same as Record but without an interface{} type to avoid allocations
-func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
- req := &recordReq{
- tm: tags,
- ms: ms,
- attachments: attachments,
- t: time.Now(),
- }
- w.c <- req
-}
-
-// SetReportingPeriod sets the interval between reporting aggregated views in
-// the program. If duration is less than or equal to zero, it enables the
-// default behavior.
-//
-// Note: each exporter makes different promises about what the lowest supported
-// duration is. For example, the Stackdriver exporter recommends a value no
-// lower than 1 minute. Consult each exporter per your needs.
-func SetReportingPeriod(d time.Duration) {
- defaultWorker.SetReportingPeriod(d)
-}
-
-// Stop stops the default worker.
-func Stop() {
- defaultWorker.Stop()
-}
-
-// SetReportingPeriod sets the interval between reporting aggregated views in
-// the program. If duration is less than or equal to zero, it enables the
-// default behavior.
-//
-// Note: each exporter makes different promises about what the lowest supported
-// duration is. For example, the Stackdriver exporter recommends a value no
-// lower than 1 minute. Consult each exporter per your needs.
-func (w *worker) SetReportingPeriod(d time.Duration) {
- // TODO(acetechnologist): ensure that the duration d is more than a certain
- // value. e.g. 1s
- req := &setReportingPeriodReq{
- d: d,
- c: make(chan bool),
- }
- w.c <- req
- <-req.c // don't return until the timer is set to the new duration.
-}
-
-// NewMeter constructs a Meter instance. You should only need to use this if
-// you need to separate out Measurement recordings and View aggregations within
-// a single process.
-func NewMeter() Meter {
- return &worker{
- measures: make(map[string]*measureRef),
- views: make(map[string]*viewInternal),
- viewStartTimes: make(map[*viewInternal]time.Time),
- timer: time.NewTicker(defaultReportingDuration),
- c: make(chan command, 1024),
- quit: make(chan bool),
- done: make(chan bool),
-
- exporters: make(map[Exporter]struct{}),
- }
-}
-
-// SetResource associates all data collected by this Meter with the specified
-// resource. This resource is reported when using metricexport.ReadAndExport;
-// it is not provided when used with ExportView/RegisterExporter, because that
-// interface does not provide a means for reporting the Resource.
-func (w *worker) SetResource(r *resource.Resource) {
- w.r = r
-}
-
-func (w *worker) Start() {
- go w.start()
-}
-
-func (w *worker) start() {
- prodMgr := metricproducer.GlobalManager()
- prodMgr.AddProducer(w)
-
- for {
- select {
- case cmd := <-w.c:
- cmd.handleCommand(w)
- case <-w.timer.C:
- w.reportUsage()
- case <-w.quit:
- w.timer.Stop()
- close(w.c)
- close(w.done)
- return
- }
- }
-}
-
-func (w *worker) Stop() {
- prodMgr := metricproducer.GlobalManager()
- prodMgr.DeleteProducer(w)
- select {
- case <-w.quit:
- default:
- close(w.quit)
- }
- <-w.done
-}
-
-func (w *worker) getMeasureRef(name string) *measureRef {
- if mr, ok := w.measures[name]; ok {
- return mr
- }
- mr := &measureRef{
- measure: name,
- views: make(map[*viewInternal]struct{}),
- }
- w.measures[name] = mr
- return mr
-}
-
-func (w *worker) tryRegisterView(v *View) (*viewInternal, error) {
- w.mu.Lock()
- defer w.mu.Unlock()
- vi, err := newViewInternal(v)
- if err != nil {
- return nil, err
- }
- if x, ok := w.views[vi.view.Name]; ok {
- if !x.view.same(vi.view) {
- return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name)
- }
-
- // the view is already registered so there is nothing to do and the
- // command is considered successful.
- return x, nil
- }
- w.views[vi.view.Name] = vi
- w.viewStartTimes[vi] = time.Now()
- ref := w.getMeasureRef(vi.view.Measure.Name())
- ref.views[vi] = struct{}{}
- return vi, nil
-}
-
-func (w *worker) unregisterView(v *viewInternal) {
- w.mu.Lock()
- defer w.mu.Unlock()
- delete(w.views, v.view.Name)
- delete(w.viewStartTimes, v)
- if measure := w.measures[v.view.Measure.Name()]; measure != nil {
- delete(measure.views, v)
- }
-}
-
-func (w *worker) reportView(v *viewInternal) {
- if !v.isSubscribed() {
- return
- }
- rows := v.collectedRows()
- viewData := &Data{
- View: v.view,
- Start: w.viewStartTimes[v],
- End: time.Now(),
- Rows: rows,
- }
- w.exportersMu.Lock()
- defer w.exportersMu.Unlock()
- for e := range w.exporters {
- e.ExportView(viewData)
- }
-}
-
-func (w *worker) reportUsage() {
- w.mu.Lock()
- defer w.mu.Unlock()
- for _, v := range w.views {
- w.reportView(v)
- }
-}
-
-func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric {
- if !v.isSubscribed() {
- return nil
- }
-
- return viewToMetric(v, w.r, now)
-}
-
-// Read reads all view data and returns them as metrics.
-// It is typically invoked by metric reader to export stats in metric format.
-func (w *worker) Read() []*metricdata.Metric {
- w.mu.Lock()
- defer w.mu.Unlock()
- now := time.Now()
- metrics := make([]*metricdata.Metric, 0, len(w.views))
- for _, v := range w.views {
- metric := w.toMetric(v, now)
- if metric != nil {
- metrics = append(metrics, metric)
- }
- }
- return metrics
-}
-
-func (w *worker) RegisterExporter(e Exporter) {
- w.exportersMu.Lock()
- defer w.exportersMu.Unlock()
-
- w.exporters[e] = struct{}{}
-}
-
-func (w *worker) UnregisterExporter(e Exporter) {
- w.exportersMu.Lock()
- defer w.exportersMu.Unlock()
-
- delete(w.exporters, e)
-}
diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go
deleted file mode 100644
index 9ac4cc059..000000000
--- a/vendor/go.opencensus.io/stats/view/worker_commands.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package view
-
-import (
- "errors"
- "fmt"
- "strings"
- "time"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/internal"
- "go.opencensus.io/tag"
-)
-
-type command interface {
- handleCommand(w *worker)
-}
-
-// getViewByNameReq is the command to get a view given its name.
-type getViewByNameReq struct {
- name string
- c chan *getViewByNameResp
-}
-
-type getViewByNameResp struct {
- v *View
-}
-
-func (cmd *getViewByNameReq) handleCommand(w *worker) {
- v := w.views[cmd.name]
- if v == nil {
- cmd.c <- &getViewByNameResp{nil}
- return
- }
- cmd.c <- &getViewByNameResp{v.view}
-}
-
-// registerViewReq is the command to register a view.
-type registerViewReq struct {
- views []*View
- err chan error
-}
-
-func (cmd *registerViewReq) handleCommand(w *worker) {
- for _, v := range cmd.views {
- if err := v.canonicalize(); err != nil {
- cmd.err <- err
- return
- }
- }
- var errstr []string
- for _, view := range cmd.views {
- vi, err := w.tryRegisterView(view)
- if err != nil {
- errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err))
- continue
- }
- internal.SubscriptionReporter(view.Measure.Name())
- vi.subscribe()
- }
- if len(errstr) > 0 {
- cmd.err <- errors.New(strings.Join(errstr, "\n"))
- } else {
- cmd.err <- nil
- }
-}
-
-// unregisterFromViewReq is the command to unregister to a view. Has no
-// impact on the data collection for client that are pulling data from the
-// library.
-type unregisterFromViewReq struct {
- views []string
- done chan struct{}
-}
-
-func (cmd *unregisterFromViewReq) handleCommand(w *worker) {
- for _, name := range cmd.views {
- vi, ok := w.views[name]
- if !ok {
- continue
- }
-
- // Report pending data for this view before removing it.
- w.reportView(vi)
-
- vi.unsubscribe()
- if !vi.isSubscribed() {
- // this was the last subscription and view is not collecting anymore.
- // The collected data can be cleared.
- vi.clearRows()
- }
- w.unregisterView(vi)
- }
- cmd.done <- struct{}{}
-}
-
-// retrieveDataReq is the command to retrieve data for a view.
-type retrieveDataReq struct {
- now time.Time
- v string
- c chan *retrieveDataResp
-}
-
-type retrieveDataResp struct {
- rows []*Row
- err error
-}
-
-func (cmd *retrieveDataReq) handleCommand(w *worker) {
- w.mu.Lock()
- defer w.mu.Unlock()
- vi, ok := w.views[cmd.v]
- if !ok {
- cmd.c <- &retrieveDataResp{
- nil,
- fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v),
- }
- return
- }
-
- if !vi.isSubscribed() {
- cmd.c <- &retrieveDataResp{
- nil,
- fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v),
- }
- return
- }
- cmd.c <- &retrieveDataResp{
- vi.collectedRows(),
- nil,
- }
-}
-
-// recordReq is the command to record data related to multiple measures
-// at once.
-type recordReq struct {
- tm *tag.Map
- ms []stats.Measurement
- attachments map[string]interface{}
- t time.Time
-}
-
-func (cmd *recordReq) handleCommand(w *worker) {
- w.mu.Lock()
- defer w.mu.Unlock()
- for _, m := range cmd.ms {
- if (m == stats.Measurement{}) { // not registered
- continue
- }
- ref := w.getMeasureRef(m.Measure().Name())
- for v := range ref.views {
- v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t)
- }
- }
-}
-
-// setReportingPeriodReq is the command to modify the duration between
-// reporting the collected data to the registered clients.
-type setReportingPeriodReq struct {
- d time.Duration
- c chan bool
-}
-
-func (cmd *setReportingPeriodReq) handleCommand(w *worker) {
- w.timer.Stop()
- if cmd.d <= 0 {
- w.timer = time.NewTicker(defaultReportingDuration)
- } else {
- w.timer = time.NewTicker(cmd.d)
- }
- cmd.c <- true
-}
diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go
deleted file mode 100644
index b27d1b26b..000000000
--- a/vendor/go.opencensus.io/tag/context.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-import (
- "context"
-)
-
-// FromContext returns the tag map stored in the context.
-func FromContext(ctx context.Context) *Map {
- // The returned tag map shouldn't be mutated.
- ts := ctx.Value(mapCtxKey)
- if ts == nil {
- return nil
- }
- return ts.(*Map)
-}
-
-// NewContext creates a new context with the given tag map.
-// To propagate a tag map to downstream methods and downstream RPCs, add a tag map
-// to the current context. NewContext will return a copy of the current context,
-// and put the tag map into the returned one.
-// If there is already a tag map in the current context, it will be replaced with m.
-func NewContext(ctx context.Context, m *Map) context.Context {
- return context.WithValue(ctx, mapCtxKey, m)
-}
-
-type ctxKey struct{}
-
-var mapCtxKey = ctxKey{}
diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go
deleted file mode 100644
index da16b74e4..000000000
--- a/vendor/go.opencensus.io/tag/doc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-/*
-Package tag contains OpenCensus tags.
-
-Tags are key-value pairs. Tags provide additional cardinality to
-the OpenCensus instrumentation data.
-
-Tags can be propagated on the wire and in the same
-process via context.Context. Encode and Decode should be
-used to represent tags into their binary propagation form.
-*/
-package tag // import "go.opencensus.io/tag"
diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go
deleted file mode 100644
index 71ec91365..000000000
--- a/vendor/go.opencensus.io/tag/key.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-// Key represents a tag key.
-type Key struct {
- name string
-}
-
-// NewKey creates or retrieves a string key identified by name.
-// Calling NewKey more than once with the same name returns the same key.
-func NewKey(name string) (Key, error) {
- if !checkKeyName(name) {
- return Key{}, errInvalidKeyName
- }
- return Key{name: name}, nil
-}
-
-// MustNewKey returns a key with the given name, and panics if name is an invalid key name.
-func MustNewKey(name string) Key {
- k, err := NewKey(name)
- if err != nil {
- panic(err)
- }
- return k
-}
-
-// Name returns the name of the key.
-func (k Key) Name() string {
- return k.name
-}
diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go
deleted file mode 100644
index 0272ef85a..000000000
--- a/vendor/go.opencensus.io/tag/map.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-import (
- "bytes"
- "context"
- "fmt"
- "sort"
-)
-
-// Tag is a key value pair that can be propagated on wire.
-type Tag struct {
- Key Key
- Value string
-}
-
-type tagContent struct {
- value string
- m metadatas
-}
-
-// Map is a map of tags. Use New to create a context containing
-// a new Map.
-type Map struct {
- m map[Key]tagContent
-}
-
-// Value returns the value for the key if a value for the key exists.
-func (m *Map) Value(k Key) (string, bool) {
- if m == nil {
- return "", false
- }
- v, ok := m.m[k]
- return v.value, ok
-}
-
-func (m *Map) String() string {
- if m == nil {
- return "nil"
- }
- keys := make([]Key, 0, len(m.m))
- for k := range m.m {
- keys = append(keys, k)
- }
- sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() })
-
- var buffer bytes.Buffer
- buffer.WriteString("{ ")
- for _, k := range keys {
- buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k]))
- }
- buffer.WriteString(" }")
- return buffer.String()
-}
-
-func (m *Map) insert(k Key, v string, md metadatas) {
- if _, ok := m.m[k]; ok {
- return
- }
- m.m[k] = tagContent{value: v, m: md}
-}
-
-func (m *Map) update(k Key, v string, md metadatas) {
- if _, ok := m.m[k]; ok {
- m.m[k] = tagContent{value: v, m: md}
- }
-}
-
-func (m *Map) upsert(k Key, v string, md metadatas) {
- m.m[k] = tagContent{value: v, m: md}
-}
-
-func (m *Map) delete(k Key) {
- delete(m.m, k)
-}
-
-func newMap() *Map {
- return &Map{m: make(map[Key]tagContent)}
-}
-
-// Mutator modifies a tag map.
-type Mutator interface {
- Mutate(t *Map) (*Map, error)
-}
-
-// Insert returns a mutator that inserts a
-// value associated with k. If k already exists in the tag map,
-// mutator doesn't update the value.
-// Metadata applies metadata to the tag. It is optional.
-// Metadatas are applied in the order in which it is provided.
-// If more than one metadata updates the same attribute then
-// the update from the last metadata prevails.
-func Insert(k Key, v string, mds ...Metadata) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- if !checkValue(v) {
- return nil, errInvalidValue
- }
- m.insert(k, v, createMetadatas(mds...))
- return m, nil
- },
- }
-}
-
-// Update returns a mutator that updates the
-// value of the tag associated with k with v. If k doesn't
-// exists in the tag map, the mutator doesn't insert the value.
-// Metadata applies metadata to the tag. It is optional.
-// Metadatas are applied in the order in which it is provided.
-// If more than one metadata updates the same attribute then
-// the update from the last metadata prevails.
-func Update(k Key, v string, mds ...Metadata) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- if !checkValue(v) {
- return nil, errInvalidValue
- }
- m.update(k, v, createMetadatas(mds...))
- return m, nil
- },
- }
-}
-
-// Upsert returns a mutator that upserts the
-// value of the tag associated with k with v. It inserts the
-// value if k doesn't exist already. It mutates the value
-// if k already exists.
-// Metadata applies metadata to the tag. It is optional.
-// Metadatas are applied in the order in which it is provided.
-// If more than one metadata updates the same attribute then
-// the update from the last metadata prevails.
-func Upsert(k Key, v string, mds ...Metadata) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- if !checkValue(v) {
- return nil, errInvalidValue
- }
- m.upsert(k, v, createMetadatas(mds...))
- return m, nil
- },
- }
-}
-
-func createMetadatas(mds ...Metadata) metadatas {
- var metas metadatas
- if len(mds) > 0 {
- for _, md := range mds {
- if md != nil {
- md(&metas)
- }
- }
- } else {
- WithTTL(TTLUnlimitedPropagation)(&metas)
- }
- return metas
-
-}
-
-// Delete returns a mutator that deletes
-// the value associated with k.
-func Delete(k Key) Mutator {
- return &mutator{
- fn: func(m *Map) (*Map, error) {
- m.delete(k)
- return m, nil
- },
- }
-}
-
-// New returns a new context that contains a tag map
-// originated from the incoming context and modified
-// with the provided mutators.
-func New(ctx context.Context, mutator ...Mutator) (context.Context, error) {
- m := newMap()
- orig := FromContext(ctx)
- if orig != nil {
- for k, v := range orig.m {
- if !checkKeyName(k.Name()) {
- return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName)
- }
- if !checkValue(v.value) {
- return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue)
- }
- m.insert(k, v.value, v.m)
- }
- }
- var err error
- for _, mod := range mutator {
- m, err = mod.Mutate(m)
- if err != nil {
- return ctx, err
- }
- }
- return NewContext(ctx, m), nil
-}
-
-// Do is similar to pprof.Do: a convenience for installing the tags
-// from the context as Go profiler labels. This allows you to
-// correlated runtime profiling with stats.
-//
-// It converts the key/values from the given map to Go profiler labels
-// and calls pprof.Do.
-//
-// Do is going to do nothing if your Go version is below 1.9.
-func Do(ctx context.Context, f func(ctx context.Context)) {
- do(ctx, f)
-}
-
-type mutator struct {
- fn func(t *Map) (*Map, error)
-}
-
-func (m *mutator) Mutate(t *Map) (*Map, error) {
- return m.fn(t)
-}
diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go
deleted file mode 100644
index c242e695c..000000000
--- a/vendor/go.opencensus.io/tag/map_codec.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-import (
- "encoding/binary"
- "fmt"
-)
-
-// KeyType defines the types of keys allowed. Currently only keyTypeString is
-// supported.
-type keyType byte
-
-const (
- keyTypeString keyType = iota
- keyTypeInt64
- keyTypeTrue
- keyTypeFalse
-
- tagsVersionID = byte(0)
-)
-
-type encoderGRPC struct {
- buf []byte
- writeIdx, readIdx int
-}
-
-// writeKeyString writes the fieldID '0' followed by the key string and value
-// string.
-func (eg *encoderGRPC) writeTagString(k, v string) {
- eg.writeByte(byte(keyTypeString))
- eg.writeStringWithVarintLen(k)
- eg.writeStringWithVarintLen(v)
-}
-
-func (eg *encoderGRPC) writeTagUint64(k string, i uint64) {
- eg.writeByte(byte(keyTypeInt64))
- eg.writeStringWithVarintLen(k)
- eg.writeUint64(i)
-}
-
-func (eg *encoderGRPC) writeTagTrue(k string) {
- eg.writeByte(byte(keyTypeTrue))
- eg.writeStringWithVarintLen(k)
-}
-
-func (eg *encoderGRPC) writeTagFalse(k string) {
- eg.writeByte(byte(keyTypeFalse))
- eg.writeStringWithVarintLen(k)
-}
-
-func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) {
- length := len(bytes)
-
- eg.growIfRequired(binary.MaxVarintLen64 + length)
- eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length))
- copy(eg.buf[eg.writeIdx:], bytes)
- eg.writeIdx += length
-}
-
-func (eg *encoderGRPC) writeStringWithVarintLen(s string) {
- length := len(s)
-
- eg.growIfRequired(binary.MaxVarintLen64 + length)
- eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length))
- copy(eg.buf[eg.writeIdx:], s)
- eg.writeIdx += length
-}
-
-func (eg *encoderGRPC) writeByte(v byte) {
- eg.growIfRequired(1)
- eg.buf[eg.writeIdx] = v
- eg.writeIdx++
-}
-
-func (eg *encoderGRPC) writeUint32(i uint32) {
- eg.growIfRequired(4)
- binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i)
- eg.writeIdx += 4
-}
-
-func (eg *encoderGRPC) writeUint64(i uint64) {
- eg.growIfRequired(8)
- binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i)
- eg.writeIdx += 8
-}
-
-func (eg *encoderGRPC) readByte() byte {
- b := eg.buf[eg.readIdx]
- eg.readIdx++
- return b
-}
-
-func (eg *encoderGRPC) readUint32() uint32 {
- i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:])
- eg.readIdx += 4
- return i
-}
-
-func (eg *encoderGRPC) readUint64() uint64 {
- i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:])
- eg.readIdx += 8
- return i
-}
-
-func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) {
- if eg.readEnded() {
- return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx)
- }
- length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:])
- if valueStart <= 0 {
- return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx)
- }
-
- valueStart += eg.readIdx
- valueEnd := valueStart + int(length)
- if valueEnd > len(eg.buf) {
- return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf))
- }
-
- eg.readIdx = valueEnd
- return eg.buf[valueStart:valueEnd], nil
-}
-
-func (eg *encoderGRPC) readStringWithVarintLen() (string, error) {
- bytes, err := eg.readBytesWithVarintLen()
- if err != nil {
- return "", err
- }
- return string(bytes), nil
-}
-
-func (eg *encoderGRPC) growIfRequired(expected int) {
- if len(eg.buf)-eg.writeIdx < expected {
- tmp := make([]byte, 2*(len(eg.buf)+1)+expected)
- copy(tmp, eg.buf)
- eg.buf = tmp
- }
-}
-
-func (eg *encoderGRPC) readEnded() bool {
- return eg.readIdx >= len(eg.buf)
-}
-
-func (eg *encoderGRPC) bytes() []byte {
- return eg.buf[:eg.writeIdx]
-}
-
-// Encode encodes the tag map into a []byte. It is useful to propagate
-// the tag maps on wire in binary format.
-func Encode(m *Map) []byte {
- if m == nil {
- return nil
- }
- eg := &encoderGRPC{
- buf: make([]byte, len(m.m)),
- }
- eg.writeByte(tagsVersionID)
- for k, v := range m.m {
- if v.m.ttl.ttl == valueTTLUnlimitedPropagation {
- eg.writeByte(byte(keyTypeString))
- eg.writeStringWithVarintLen(k.name)
- eg.writeBytesWithVarintLen([]byte(v.value))
- }
- }
- return eg.bytes()
-}
-
-// Decode decodes the given []byte into a tag map.
-func Decode(bytes []byte) (*Map, error) {
- ts := newMap()
- err := DecodeEach(bytes, ts.upsert)
- if err != nil {
- // no partial failures
- return nil, err
- }
- return ts, nil
-}
-
-// DecodeEach decodes the given serialized tag map, calling handler for each
-// tag key and value decoded.
-func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error {
- eg := &encoderGRPC{
- buf: bytes,
- }
- if len(eg.buf) == 0 {
- return nil
- }
-
- version := eg.readByte()
- if version > tagsVersionID {
- return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID)
- }
-
- for !eg.readEnded() {
- typ := keyType(eg.readByte())
-
- if typ != keyTypeString {
- return fmt.Errorf("cannot decode: invalid key type: %q", typ)
- }
-
- k, err := eg.readBytesWithVarintLen()
- if err != nil {
- return err
- }
-
- v, err := eg.readBytesWithVarintLen()
- if err != nil {
- return err
- }
-
- key, err := NewKey(string(k))
- if err != nil {
- return err
- }
- val := string(v)
- if !checkValue(val) {
- return errInvalidValue
- }
- fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation)))
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go
deleted file mode 100644
index 6571a583e..000000000
--- a/vendor/go.opencensus.io/tag/metadata.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package tag
-
-const (
- // valueTTLNoPropagation prevents tag from propagating.
- valueTTLNoPropagation = 0
-
- // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops.
- valueTTLUnlimitedPropagation = -1
-)
-
-// TTL is metadata that specifies number of hops a tag can propagate.
-// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata
-type TTL struct {
- ttl int
-}
-
-var (
- // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops.
- TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation}
-
- // TTLNoPropagation is TTL metadata that prevents tag from propagating.
- TTLNoPropagation = TTL{ttl: valueTTLNoPropagation}
-)
-
-type metadatas struct {
- ttl TTL
-}
-
-// Metadata applies metadatas specified by the function.
-type Metadata func(*metadatas)
-
-// WithTTL applies metadata with provided ttl.
-func WithTTL(ttl TTL) Metadata {
- return func(m *metadatas) {
- m.ttl = ttl
- }
-}
diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go
deleted file mode 100644
index 8fb17226f..000000000
--- a/vendor/go.opencensus.io/tag/profile_19.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.9
-// +build go1.9
-
-package tag
-
-import (
- "context"
- "runtime/pprof"
-)
-
-func do(ctx context.Context, f func(ctx context.Context)) {
- m := FromContext(ctx)
- keyvals := make([]string, 0, 2*len(m.m))
- for k, v := range m.m {
- keyvals = append(keyvals, k.Name(), v.value)
- }
- pprof.Do(ctx, pprof.Labels(keyvals...), f)
-}
diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go
deleted file mode 100644
index 0939fc674..000000000
--- a/vendor/go.opencensus.io/tag/validate.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tag
-
-import "errors"
-
-const (
- maxKeyLength = 255
-
- // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')).
- validKeyValueMin = 32
- validKeyValueMax = 126
-)
-
-var (
- errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters")
- errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters")
-)
-
-func checkKeyName(name string) bool {
- if len(name) == 0 {
- return false
- }
- if len(name) > maxKeyLength {
- return false
- }
- return isASCII(name)
-}
-
-func isASCII(s string) bool {
- for _, c := range s {
- if (c < validKeyValueMin) || (c > validKeyValueMax) {
- return false
- }
- }
- return true
-}
-
-func checkValue(v string) bool {
- if len(v) > maxKeyLength {
- return false
- }
- return isASCII(v)
-}
diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go
deleted file mode 100644
index c8e26ed63..000000000
--- a/vendor/go.opencensus.io/trace/basetypes.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "fmt"
- "time"
-)
-
-type (
- // TraceID is a 16-byte identifier for a set of spans.
- TraceID [16]byte
-
- // SpanID is an 8-byte identifier for a single span.
- SpanID [8]byte
-)
-
-func (t TraceID) String() string {
- return fmt.Sprintf("%02x", t[:])
-}
-
-func (s SpanID) String() string {
- return fmt.Sprintf("%02x", s[:])
-}
-
-// Annotation represents a text annotation with a set of attributes and a timestamp.
-type Annotation struct {
- Time time.Time
- Message string
- Attributes map[string]interface{}
-}
-
-// Attribute represents a key-value pair on a span, link or annotation.
-// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute.
-type Attribute struct {
- key string
- value interface{}
-}
-
-// Key returns the attribute's key
-func (a *Attribute) Key() string {
- return a.key
-}
-
-// Value returns the attribute's value
-func (a *Attribute) Value() interface{} {
- return a.value
-}
-
-// BoolAttribute returns a bool-valued attribute.
-func BoolAttribute(key string, value bool) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// Int64Attribute returns an int64-valued attribute.
-func Int64Attribute(key string, value int64) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// Float64Attribute returns a float64-valued attribute.
-func Float64Attribute(key string, value float64) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// StringAttribute returns a string-valued attribute.
-func StringAttribute(key string, value string) Attribute {
- return Attribute{key: key, value: value}
-}
-
-// LinkType specifies the relationship between the span that had the link
-// added, and the linked span.
-type LinkType int32
-
-// LinkType values.
-const (
- LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown.
- LinkTypeChild // The linked span is a child of the current span.
- LinkTypeParent // The linked span is the parent of the current span.
-)
-
-// Link represents a reference from one span to another span.
-type Link struct {
- TraceID TraceID
- SpanID SpanID
- Type LinkType
- // Attributes is a set of attributes on the link.
- Attributes map[string]interface{}
-}
-
-// MessageEventType specifies the type of message event.
-type MessageEventType int32
-
-// MessageEventType values.
-const (
- MessageEventTypeUnspecified MessageEventType = iota // Unknown event type.
- MessageEventTypeSent // Indicates a sent RPC message.
- MessageEventTypeRecv // Indicates a received RPC message.
-)
-
-// MessageEvent represents an event describing a message sent or received on the network.
-type MessageEvent struct {
- Time time.Time
- EventType MessageEventType
- MessageID int64
- UncompressedByteSize int64
- CompressedByteSize int64
-}
-
-// Status is the status of a Span.
-type Status struct {
- // Code is a status code. Zero indicates success.
- //
- // If Code will be propagated to Google APIs, it ideally should be a value from
- // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto .
- Code int32
- Message string
-}
diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go
deleted file mode 100644
index 775f8274f..000000000
--- a/vendor/go.opencensus.io/trace/config.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
-
- "go.opencensus.io/trace/internal"
-)
-
-// Config represents the global tracing configuration.
-type Config struct {
- // DefaultSampler is the default sampler used when creating new spans.
- DefaultSampler Sampler
-
- // IDGenerator is for internal use only.
- IDGenerator internal.IDGenerator
-
- // MaxAnnotationEventsPerSpan is max number of annotation events per span
- MaxAnnotationEventsPerSpan int
-
- // MaxMessageEventsPerSpan is max number of message events per span
- MaxMessageEventsPerSpan int
-
- // MaxAnnotationEventsPerSpan is max number of attributes per span
- MaxAttributesPerSpan int
-
- // MaxLinksPerSpan is max number of links per span
- MaxLinksPerSpan int
-}
-
-var configWriteMu sync.Mutex
-
-const (
- // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span
- DefaultMaxAnnotationEventsPerSpan = 32
-
- // DefaultMaxMessageEventsPerSpan is default max number of message events per span
- DefaultMaxMessageEventsPerSpan = 128
-
- // DefaultMaxAttributesPerSpan is default max number of attributes per span
- DefaultMaxAttributesPerSpan = 32
-
- // DefaultMaxLinksPerSpan is default max number of links per span
- DefaultMaxLinksPerSpan = 32
-)
-
-// ApplyConfig applies changes to the global tracing configuration.
-//
-// Fields not provided in the given config are going to be preserved.
-func ApplyConfig(cfg Config) {
- configWriteMu.Lock()
- defer configWriteMu.Unlock()
- c := *config.Load().(*Config)
- if cfg.DefaultSampler != nil {
- c.DefaultSampler = cfg.DefaultSampler
- }
- if cfg.IDGenerator != nil {
- c.IDGenerator = cfg.IDGenerator
- }
- if cfg.MaxAnnotationEventsPerSpan > 0 {
- c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan
- }
- if cfg.MaxMessageEventsPerSpan > 0 {
- c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan
- }
- if cfg.MaxAttributesPerSpan > 0 {
- c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan
- }
- if cfg.MaxLinksPerSpan > 0 {
- c.MaxLinksPerSpan = cfg.MaxLinksPerSpan
- }
- config.Store(&c)
-}
diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go
deleted file mode 100644
index 7a1616a55..000000000
--- a/vendor/go.opencensus.io/trace/doc.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package trace contains support for OpenCensus distributed tracing.
-
-The following assumes a basic familiarity with OpenCensus concepts.
-See http://opencensus.io
-
-# Exporting Traces
-
-To export collected tracing data, register at least one exporter. You can use
-one of the provided exporters or write your own.
-
- trace.RegisterExporter(exporter)
-
-By default, traces will be sampled relatively rarely. To change the sampling
-frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
-to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
-
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
-
-Be careful about using trace.AlwaysSample in a production application with
-significant traffic: a new trace will be started and exported for every request.
-
-# Adding Spans to a Trace
-
-A trace consists of a tree of spans. In Go, the current span is carried in a
-context.Context.
-
-It is common to want to capture all the activity of a function call in a span. For
-this to work, the function must take a context.Context as a parameter. Add these two
-lines to the top of the function:
-
- ctx, span := trace.StartSpan(ctx, "example.com/Run")
- defer span.End()
-
-StartSpan will create a new top-level span if the context
-doesn't contain another span, otherwise it will create a child span.
-*/
-package trace // import "go.opencensus.io/trace"
diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go
deleted file mode 100644
index ffc264f23..000000000
--- a/vendor/go.opencensus.io/trace/evictedqueue.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-type evictedQueue struct {
- queue []interface{}
- capacity int
- droppedCount int
-}
-
-func newEvictedQueue(capacity int) *evictedQueue {
- eq := &evictedQueue{
- capacity: capacity,
- queue: make([]interface{}, 0),
- }
-
- return eq
-}
-
-func (eq *evictedQueue) add(value interface{}) {
- if len(eq.queue) == eq.capacity {
- eq.queue = eq.queue[1:]
- eq.droppedCount++
- }
- eq.queue = append(eq.queue, value)
-}
diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go
deleted file mode 100644
index e0d9a4b99..000000000
--- a/vendor/go.opencensus.io/trace/export.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Exporter is a type for functions that receive sampled trace spans.
-//
-// The ExportSpan method should be safe for concurrent use and should return
-// quickly; if an Exporter takes a significant amount of time to process a
-// SpanData, that work should be done on another goroutine.
-//
-// The SpanData should not be modified, but a pointer to it can be kept.
-type Exporter interface {
- ExportSpan(s *SpanData)
-}
-
-type exportersMap map[Exporter]struct{}
-
-var (
- exporterMu sync.Mutex
- exporters atomic.Value
-)
-
-// RegisterExporter adds to the list of Exporters that will receive sampled
-// trace spans.
-//
-// Binaries can register exporters, libraries shouldn't register exporters.
-func RegisterExporter(e Exporter) {
- exporterMu.Lock()
- new := make(exportersMap)
- if old, ok := exporters.Load().(exportersMap); ok {
- for k, v := range old {
- new[k] = v
- }
- }
- new[e] = struct{}{}
- exporters.Store(new)
- exporterMu.Unlock()
-}
-
-// UnregisterExporter removes from the list of Exporters the Exporter that was
-// registered with the given name.
-func UnregisterExporter(e Exporter) {
- exporterMu.Lock()
- new := make(exportersMap)
- if old, ok := exporters.Load().(exportersMap); ok {
- for k, v := range old {
- new[k] = v
- }
- }
- delete(new, e)
- exporters.Store(new)
- exporterMu.Unlock()
-}
-
-// SpanData contains all the information collected by a Span.
-type SpanData struct {
- SpanContext
- ParentSpanID SpanID
- SpanKind int
- Name string
- StartTime time.Time
- // The wall clock time of EndTime will be adjusted to always be offset
- // from StartTime by the duration of the span.
- EndTime time.Time
- // The values of Attributes each have type string, bool, or int64.
- Attributes map[string]interface{}
- Annotations []Annotation
- MessageEvents []MessageEvent
- Status
- Links []Link
- HasRemoteParent bool
- DroppedAttributeCount int
- DroppedAnnotationCount int
- DroppedMessageEventCount int
- DroppedLinkCount int
-
- // ChildSpanCount holds the number of child span created for this span.
- ChildSpanCount int
-}
diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go
deleted file mode 100644
index 7e808d8f3..000000000
--- a/vendor/go.opencensus.io/trace/internal/internal.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal provides trace internals.
-package internal
-
-// IDGenerator allows custom generators for TraceId and SpanId.
-type IDGenerator interface {
- NewTraceID() [16]byte
- NewSpanID() [8]byte
-}
diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go
deleted file mode 100644
index 80095a5f6..000000000
--- a/vendor/go.opencensus.io/trace/lrumap.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2019, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "github.com/golang/groupcache/lru"
-)
-
-// A simple lru.Cache wrapper that tracks the keys of the current contents and
-// the cumulative number of evicted items.
-type lruMap struct {
- cacheKeys map[lru.Key]bool
- cache *lru.Cache
- droppedCount int
-}
-
-func newLruMap(size int) *lruMap {
- lm := &lruMap{
- cacheKeys: make(map[lru.Key]bool),
- cache: lru.New(size),
- droppedCount: 0,
- }
- lm.cache.OnEvicted = func(key lru.Key, value interface{}) {
- delete(lm.cacheKeys, key)
- lm.droppedCount++
- }
- return lm
-}
-
-func (lm lruMap) len() int {
- return lm.cache.Len()
-}
-
-func (lm lruMap) keys() []interface{} {
- keys := make([]interface{}, 0, len(lm.cacheKeys))
- for k := range lm.cacheKeys {
- keys = append(keys, k)
- }
- return keys
-}
-
-func (lm *lruMap) add(key, value interface{}) {
- lm.cacheKeys[lru.Key(key)] = true
- lm.cache.Add(lru.Key(key), value)
-}
-
-func (lm *lruMap) get(key interface{}) (interface{}, bool) {
- return lm.cache.Get(key)
-}
diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go
deleted file mode 100644
index 1eb190a96..000000000
--- a/vendor/go.opencensus.io/trace/propagation/propagation.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package propagation implements the binary trace context format.
-package propagation // import "go.opencensus.io/trace/propagation"
-
-// TODO: link to external spec document.
-
-// BinaryFormat format:
-//
-// Binary value:
-// version_id: 1 byte representing the version id.
-//
-// For version_id = 0:
-//
-// version_format:
-// field_format:
-//
-// Fields:
-//
-// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id.
-// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id.
-// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options.
-//
-// Fields MUST be encoded using the field id order (smaller to higher).
-//
-// Valid value example:
-//
-// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97,
-// 98, 99, 100, 101, 102, 103, 104, 2, 1}
-//
-// version_id = 0;
-// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}
-// span_id = {97, 98, 99, 100, 101, 102, 103, 104};
-// trace_options = {1};
-
-import (
- "net/http"
-
- "go.opencensus.io/trace"
-)
-
-// Binary returns the binary format representation of a SpanContext.
-//
-// If sc is the zero value, Binary returns nil.
-func Binary(sc trace.SpanContext) []byte {
- if sc == (trace.SpanContext{}) {
- return nil
- }
- var b [29]byte
- copy(b[2:18], sc.TraceID[:])
- b[18] = 1
- copy(b[19:27], sc.SpanID[:])
- b[27] = 2
- b[28] = uint8(sc.TraceOptions)
- return b[:]
-}
-
-// FromBinary returns the SpanContext represented by b.
-//
-// If b has an unsupported version ID or contains no TraceID, FromBinary
-// returns with ok==false.
-func FromBinary(b []byte) (sc trace.SpanContext, ok bool) {
- if len(b) == 0 || b[0] != 0 {
- return trace.SpanContext{}, false
- }
- b = b[1:]
- if len(b) >= 17 && b[0] == 0 {
- copy(sc.TraceID[:], b[1:17])
- b = b[17:]
- } else {
- return trace.SpanContext{}, false
- }
- if len(b) >= 9 && b[0] == 1 {
- copy(sc.SpanID[:], b[1:9])
- b = b[9:]
- }
- if len(b) >= 2 && b[0] == 2 {
- sc.TraceOptions = trace.TraceOptions(b[1])
- }
- return sc, true
-}
-
-// HTTPFormat implementations propagate span contexts
-// in HTTP requests.
-//
-// SpanContextFromRequest extracts a span context from incoming
-// requests.
-//
-// SpanContextToRequest modifies the given request to include the given
-// span context.
-type HTTPFormat interface {
- SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool)
- SpanContextToRequest(sc trace.SpanContext, req *http.Request)
-}
-
-// TODO(jbd): Find a more representative but short name for HTTPFormat.
diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go
deleted file mode 100644
index 71c10f9e3..000000000
--- a/vendor/go.opencensus.io/trace/sampling.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "encoding/binary"
-)
-
-const defaultSamplingProbability = 1e-4
-
-// Sampler decides whether a trace should be sampled and exported.
-type Sampler func(SamplingParameters) SamplingDecision
-
-// SamplingParameters contains the values passed to a Sampler.
-type SamplingParameters struct {
- ParentContext SpanContext
- TraceID TraceID
- SpanID SpanID
- Name string
- HasRemoteParent bool
-}
-
-// SamplingDecision is the value returned by a Sampler.
-type SamplingDecision struct {
- Sample bool
-}
-
-// ProbabilitySampler returns a Sampler that samples a given fraction of traces.
-//
-// It also samples spans whose parents are sampled.
-func ProbabilitySampler(fraction float64) Sampler {
- if !(fraction >= 0) {
- fraction = 0
- } else if fraction >= 1 {
- return AlwaysSample()
- }
-
- traceIDUpperBound := uint64(fraction * (1 << 63))
- return Sampler(func(p SamplingParameters) SamplingDecision {
- if p.ParentContext.IsSampled() {
- return SamplingDecision{Sample: true}
- }
- x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1
- return SamplingDecision{Sample: x < traceIDUpperBound}
- })
-}
-
-// AlwaysSample returns a Sampler that samples every trace.
-// Be careful about using this sampler in a production application with
-// significant traffic: a new trace will be started and exported for every
-// request.
-func AlwaysSample() Sampler {
- return func(p SamplingParameters) SamplingDecision {
- return SamplingDecision{Sample: true}
- }
-}
-
-// NeverSample returns a Sampler that samples no traces.
-func NeverSample() Sampler {
- return func(p SamplingParameters) SamplingDecision {
- return SamplingDecision{Sample: false}
- }
-}
diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go
deleted file mode 100644
index fbabad34c..000000000
--- a/vendor/go.opencensus.io/trace/spanbucket.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "time"
-)
-
-// samplePeriod is the minimum time between accepting spans in a single bucket.
-const samplePeriod = time.Second
-
-// defaultLatencies contains the default latency bucket bounds.
-// TODO: consider defaults, make configurable
-var defaultLatencies = [...]time.Duration{
- 10 * time.Microsecond,
- 100 * time.Microsecond,
- time.Millisecond,
- 10 * time.Millisecond,
- 100 * time.Millisecond,
- time.Second,
- 10 * time.Second,
- time.Minute,
-}
-
-// bucket is a container for a set of spans for a particular error code or latency range.
-type bucket struct {
- nextTime time.Time // next time we can accept a span
- buffer []*SpanData // circular buffer of spans
- nextIndex int // location next SpanData should be placed in buffer
- overflow bool // whether the circular buffer has wrapped around
-}
-
-func makeBucket(bufferSize int) bucket {
- return bucket{
- buffer: make([]*SpanData, bufferSize),
- }
-}
-
-// add adds a span to the bucket, if nextTime has been reached.
-func (b *bucket) add(s *SpanData) {
- if s.EndTime.Before(b.nextTime) {
- return
- }
- if len(b.buffer) == 0 {
- return
- }
- b.nextTime = s.EndTime.Add(samplePeriod)
- b.buffer[b.nextIndex] = s
- b.nextIndex++
- if b.nextIndex == len(b.buffer) {
- b.nextIndex = 0
- b.overflow = true
- }
-}
-
-// size returns the number of spans in the bucket.
-func (b *bucket) size() int {
- if b.overflow {
- return len(b.buffer)
- }
- return b.nextIndex
-}
-
-// span returns the ith span in the bucket.
-func (b *bucket) span(i int) *SpanData {
- if !b.overflow {
- return b.buffer[i]
- }
- if i < len(b.buffer)-b.nextIndex {
- return b.buffer[b.nextIndex+i]
- }
- return b.buffer[b.nextIndex+i-len(b.buffer)]
-}
-
-// resize changes the size of the bucket to n, keeping up to n existing spans.
-func (b *bucket) resize(n int) {
- cur := b.size()
- newBuffer := make([]*SpanData, n)
- if cur < n {
- for i := 0; i < cur; i++ {
- newBuffer[i] = b.span(i)
- }
- b.buffer = newBuffer
- b.nextIndex = cur
- b.overflow = false
- return
- }
- for i := 0; i < n; i++ {
- newBuffer[i] = b.span(i + cur - n)
- }
- b.buffer = newBuffer
- b.nextIndex = 0
- b.overflow = true
-}
-
-// latencyBucket returns the appropriate bucket number for a given latency.
-func latencyBucket(latency time.Duration) int {
- i := 0
- for i < len(defaultLatencies) && latency >= defaultLatencies[i] {
- i++
- }
- return i
-}
-
-// latencyBucketBounds returns the lower and upper bounds for a latency bucket
-// number.
-//
-// The lower bound is inclusive, the upper bound is exclusive (except for the
-// last bucket.)
-func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) {
- if index == 0 {
- return 0, defaultLatencies[index]
- }
- if index == len(defaultLatencies) {
- return defaultLatencies[index-1], 1<<63 - 1
- }
- return defaultLatencies[index-1], defaultLatencies[index]
-}
diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go
deleted file mode 100644
index e601f76f2..000000000
--- a/vendor/go.opencensus.io/trace/spanstore.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "sync"
- "time"
-
- "go.opencensus.io/internal"
-)
-
-const (
- maxBucketSize = 100000
- defaultBucketSize = 10
-)
-
-var (
- ssmu sync.RWMutex // protects spanStores
- spanStores = make(map[string]*spanStore)
-)
-
-// This exists purely to avoid exposing internal methods used by z-Pages externally.
-type internalOnly struct{}
-
-func init() {
- //TODO(#412): remove
- internal.Trace = &internalOnly{}
-}
-
-// ReportActiveSpans returns the active spans for the given name.
-func (i internalOnly) ReportActiveSpans(name string) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- for activeSpan := range s.active {
- if s, ok := activeSpan.(*span); ok {
- out = append(out, s.makeSpanData())
- }
- }
- return out
-}
-
-// ReportSpansByError returns a sample of error spans.
-//
-// If code is nonzero, only spans with that status code are returned.
-func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- if code != 0 {
- if b, ok := s.errors[code]; ok {
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- out = append(out, sd)
- }
- }
- } else {
- for _, b := range s.errors {
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- out = append(out, sd)
- }
- }
- }
- return out
-}
-
-// ConfigureBucketSizes sets the number of spans to keep per latency and error
-// bucket for different span names.
-func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) {
- for _, bc := range bcs {
- latencyBucketSize := bc.MaxRequestsSucceeded
- if latencyBucketSize < 0 {
- latencyBucketSize = 0
- }
- if latencyBucketSize > maxBucketSize {
- latencyBucketSize = maxBucketSize
- }
- errorBucketSize := bc.MaxRequestsErrors
- if errorBucketSize < 0 {
- errorBucketSize = 0
- }
- if errorBucketSize > maxBucketSize {
- errorBucketSize = maxBucketSize
- }
- spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize)
- }
-}
-
-// ReportSpansPerMethod returns a summary of what spans are being stored for each span name.
-func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary {
- out := make(map[string]internal.PerMethodSummary)
- ssmu.RLock()
- defer ssmu.RUnlock()
- for name, s := range spanStores {
- s.mu.Lock()
- p := internal.PerMethodSummary{
- Active: len(s.active),
- }
- for code, b := range s.errors {
- p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{
- ErrorCode: code,
- Size: b.size(),
- })
- }
- for i, b := range s.latency {
- min, max := latencyBucketBounds(i)
- p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{
- MinLatency: min,
- MaxLatency: max,
- Size: b.size(),
- })
- }
- s.mu.Unlock()
- out[name] = p
- }
- return out
-}
-
-// ReportSpansByLatency returns a sample of successful spans.
-//
-// minLatency is the minimum latency of spans to be returned.
-// maxLatency, if nonzero, is the maximum latency of spans to be returned.
-func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData {
- s := spanStoreForName(name)
- if s == nil {
- return nil
- }
- var out []*SpanData
- s.mu.Lock()
- defer s.mu.Unlock()
- for i, b := range s.latency {
- min, max := latencyBucketBounds(i)
- if i+1 != len(s.latency) && max <= minLatency {
- continue
- }
- if maxLatency != 0 && maxLatency < min {
- continue
- }
- for _, sd := range b.buffer {
- if sd == nil {
- break
- }
- if minLatency != 0 || maxLatency != 0 {
- d := sd.EndTime.Sub(sd.StartTime)
- if d < minLatency {
- continue
- }
- if maxLatency != 0 && d > maxLatency {
- continue
- }
- }
- out = append(out, sd)
- }
- }
- return out
-}
-
-// spanStore keeps track of spans stored for a particular span name.
-//
-// It contains all active spans; a sample of spans for failed requests,
-// categorized by error code; and a sample of spans for successful requests,
-// bucketed by latency.
-type spanStore struct {
- mu sync.Mutex // protects everything below.
- active map[SpanInterface]struct{}
- errors map[int32]*bucket
- latency []bucket
- maxSpansPerErrorBucket int
-}
-
-// newSpanStore creates a span store.
-func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore {
- s := &spanStore{
- active: make(map[SpanInterface]struct{}),
- latency: make([]bucket, len(defaultLatencies)+1),
- maxSpansPerErrorBucket: errorBucketSize,
- }
- for i := range s.latency {
- s.latency[i] = makeBucket(latencyBucketSize)
- }
- return s
-}
-
-// spanStoreForName returns the spanStore for the given name.
-//
-// It returns nil if it doesn't exist.
-func spanStoreForName(name string) *spanStore {
- var s *spanStore
- ssmu.RLock()
- s, _ = spanStores[name]
- ssmu.RUnlock()
- return s
-}
-
-// spanStoreForNameCreateIfNew returns the spanStore for the given name.
-//
-// It creates it if it didn't exist.
-func spanStoreForNameCreateIfNew(name string) *spanStore {
- ssmu.RLock()
- s, ok := spanStores[name]
- ssmu.RUnlock()
- if ok {
- return s
- }
- ssmu.Lock()
- defer ssmu.Unlock()
- s, ok = spanStores[name]
- if ok {
- return s
- }
- s = newSpanStore(name, defaultBucketSize, defaultBucketSize)
- spanStores[name] = s
- return s
-}
-
-// spanStoreSetSize resizes the spanStore for the given name.
-//
-// It creates it if it didn't exist.
-func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) {
- ssmu.RLock()
- s, ok := spanStores[name]
- ssmu.RUnlock()
- if ok {
- s.resize(latencyBucketSize, errorBucketSize)
- return
- }
- ssmu.Lock()
- defer ssmu.Unlock()
- s, ok = spanStores[name]
- if ok {
- s.resize(latencyBucketSize, errorBucketSize)
- return
- }
- s = newSpanStore(name, latencyBucketSize, errorBucketSize)
- spanStores[name] = s
-}
-
-func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) {
- s.mu.Lock()
- for i := range s.latency {
- s.latency[i].resize(latencyBucketSize)
- }
- for _, b := range s.errors {
- b.resize(errorBucketSize)
- }
- s.maxSpansPerErrorBucket = errorBucketSize
- s.mu.Unlock()
-}
-
-// add adds a span to the active bucket of the spanStore.
-func (s *spanStore) add(span SpanInterface) {
- s.mu.Lock()
- s.active[span] = struct{}{}
- s.mu.Unlock()
-}
-
-// finished removes a span from the active set, and adds a corresponding
-// SpanData to a latency or error bucket.
-func (s *spanStore) finished(span SpanInterface, sd *SpanData) {
- latency := sd.EndTime.Sub(sd.StartTime)
- if latency < 0 {
- latency = 0
- }
- code := sd.Status.Code
-
- s.mu.Lock()
- delete(s.active, span)
- if code == 0 {
- s.latency[latencyBucket(latency)].add(sd)
- } else {
- if s.errors == nil {
- s.errors = make(map[int32]*bucket)
- }
- if b := s.errors[code]; b != nil {
- b.add(sd)
- } else {
- b := makeBucket(s.maxSpansPerErrorBucket)
- s.errors[code] = &b
- b.add(sd)
- }
- }
- s.mu.Unlock()
-}
diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go
deleted file mode 100644
index ec60effd1..000000000
--- a/vendor/go.opencensus.io/trace/status_codes.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-// Status codes for use with Span.SetStatus. These correspond to the status
-// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
-const (
- StatusCodeOK = 0
- StatusCodeCancelled = 1
- StatusCodeUnknown = 2
- StatusCodeInvalidArgument = 3
- StatusCodeDeadlineExceeded = 4
- StatusCodeNotFound = 5
- StatusCodeAlreadyExists = 6
- StatusCodePermissionDenied = 7
- StatusCodeResourceExhausted = 8
- StatusCodeFailedPrecondition = 9
- StatusCodeAborted = 10
- StatusCodeOutOfRange = 11
- StatusCodeUnimplemented = 12
- StatusCodeInternal = 13
- StatusCodeUnavailable = 14
- StatusCodeDataLoss = 15
- StatusCodeUnauthenticated = 16
-)
diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go
deleted file mode 100644
index 861df9d39..000000000
--- a/vendor/go.opencensus.io/trace/trace.go
+++ /dev/null
@@ -1,595 +0,0 @@
-// Copyright 2017, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
- crand "crypto/rand"
- "encoding/binary"
- "fmt"
- "math/rand"
- "sync"
- "sync/atomic"
- "time"
-
- "go.opencensus.io/internal"
- "go.opencensus.io/trace/tracestate"
-)
-
-type tracer struct{}
-
-var _ Tracer = &tracer{}
-
-// Span represents a span of a trace. It has an associated SpanContext, and
-// stores data accumulated while the span is active.
-//
-// Ideally users should interact with Spans by calling the functions in this
-// package that take a Context parameter.
-type span struct {
- // data contains information recorded about the span.
- //
- // It will be non-nil if we are exporting the span or recording events for it.
- // Otherwise, data is nil, and the Span is simply a carrier for the
- // SpanContext, so that the trace ID is propagated.
- data *SpanData
- mu sync.Mutex // protects the contents of *data (but not the pointer value.)
- spanContext SpanContext
-
- // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry
- // is removed to create room for a new entry.
- lruAttributes *lruMap
-
- // annotations are stored in FIFO queue capped by configured limit.
- annotations *evictedQueue
-
- // messageEvents are stored in FIFO queue capped by configured limit.
- messageEvents *evictedQueue
-
- // links are stored in FIFO queue capped by configured limit.
- links *evictedQueue
-
- // spanStore is the spanStore this span belongs to, if any, otherwise it is nil.
- *spanStore
- endOnce sync.Once
-
- executionTracerTaskEnd func() // ends the execution tracer span
-}
-
-// IsRecordingEvents returns true if events are being recorded for this span.
-// Use this check to avoid computing expensive annotations when they will never
-// be used.
-func (s *span) IsRecordingEvents() bool {
- if s == nil {
- return false
- }
- return s.data != nil
-}
-
-// TraceOptions contains options associated with a trace span.
-type TraceOptions uint32
-
-// IsSampled returns true if the span will be exported.
-func (sc SpanContext) IsSampled() bool {
- return sc.TraceOptions.IsSampled()
-}
-
-// setIsSampled sets the TraceOptions bit that determines whether the span will be exported.
-func (sc *SpanContext) setIsSampled(sampled bool) {
- if sampled {
- sc.TraceOptions |= 1
- } else {
- sc.TraceOptions &= ^TraceOptions(1)
- }
-}
-
-// IsSampled returns true if the span will be exported.
-func (t TraceOptions) IsSampled() bool {
- return t&1 == 1
-}
-
-// SpanContext contains the state that must propagate across process boundaries.
-//
-// SpanContext is not an implementation of context.Context.
-// TODO: add reference to external Census docs for SpanContext.
-type SpanContext struct {
- TraceID TraceID
- SpanID SpanID
- TraceOptions TraceOptions
- Tracestate *tracestate.Tracestate
-}
-
-type contextKey struct{}
-
-// FromContext returns the Span stored in a context, or nil if there isn't one.
-func (t *tracer) FromContext(ctx context.Context) *Span {
- s, _ := ctx.Value(contextKey{}).(*Span)
- return s
-}
-
-// NewContext returns a new context with the given Span attached.
-func (t *tracer) NewContext(parent context.Context, s *Span) context.Context {
- return context.WithValue(parent, contextKey{}, s)
-}
-
-// All available span kinds. Span kind must be either one of these values.
-const (
- SpanKindUnspecified = iota
- SpanKindServer
- SpanKindClient
-)
-
-// StartOptions contains options concerning how a span is started.
-type StartOptions struct {
- // Sampler to consult for this Span. If provided, it is always consulted.
- //
- // If not provided, then the behavior differs based on whether
- // the parent of this Span is remote, local, or there is no parent.
- // In the case of a remote parent or no parent, the
- // default sampler (see Config) will be consulted. Otherwise,
- // when there is a non-remote parent, no new sampling decision will be made:
- // we will preserve the sampling of the parent.
- Sampler Sampler
-
- // SpanKind represents the kind of a span. If none is set,
- // SpanKindUnspecified is used.
- SpanKind int
-}
-
-// StartOption apply changes to StartOptions.
-type StartOption func(*StartOptions)
-
-// WithSpanKind makes new spans to be created with the given kind.
-func WithSpanKind(spanKind int) StartOption {
- return func(o *StartOptions) {
- o.SpanKind = spanKind
- }
-}
-
-// WithSampler makes new spans to be be created with a custom sampler.
-// Otherwise, the global sampler is used.
-func WithSampler(sampler Sampler) StartOption {
- return func(o *StartOptions) {
- o.Sampler = sampler
- }
-}
-
-// StartSpan starts a new child span of the current span in the context. If
-// there is no span in the context, creates a new trace and span.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
- var opts StartOptions
- var parent SpanContext
- if p := t.FromContext(ctx); p != nil {
- if ps, ok := p.internal.(*span); ok {
- ps.addChild()
- }
- parent = p.SpanContext()
- }
- for _, op := range o {
- op(&opts)
- }
- span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts)
-
- ctx, end := startExecutionTracerTask(ctx, name)
- span.executionTracerTaskEnd = end
- extSpan := NewSpan(span)
- return t.NewContext(ctx, extSpan), extSpan
-}
-
-// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
-//
-// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
-// preferred for cases where the parent is propagated via an incoming request.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
- var opts StartOptions
- for _, op := range o {
- op(&opts)
- }
- span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts)
- ctx, end := startExecutionTracerTask(ctx, name)
- span.executionTracerTaskEnd = end
- extSpan := NewSpan(span)
- return t.NewContext(ctx, extSpan), extSpan
-}
-
-func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span {
- s := &span{}
- s.spanContext = parent
-
- cfg := config.Load().(*Config)
- if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok {
- // lazy initialization
- gen.init()
- }
-
- if !hasParent {
- s.spanContext.TraceID = cfg.IDGenerator.NewTraceID()
- }
- s.spanContext.SpanID = cfg.IDGenerator.NewSpanID()
- sampler := cfg.DefaultSampler
-
- if !hasParent || remoteParent || o.Sampler != nil {
- // If this span is the child of a local span and no Sampler is set in the
- // options, keep the parent's TraceOptions.
- //
- // Otherwise, consult the Sampler in the options if it is non-nil, otherwise
- // the default sampler.
- if o.Sampler != nil {
- sampler = o.Sampler
- }
- s.spanContext.setIsSampled(sampler(SamplingParameters{
- ParentContext: parent,
- TraceID: s.spanContext.TraceID,
- SpanID: s.spanContext.SpanID,
- Name: name,
- HasRemoteParent: remoteParent}).Sample)
- }
-
- if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() {
- return s
- }
-
- s.data = &SpanData{
- SpanContext: s.spanContext,
- StartTime: time.Now(),
- SpanKind: o.SpanKind,
- Name: name,
- HasRemoteParent: remoteParent,
- }
- s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan)
- s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan)
- s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan)
- s.links = newEvictedQueue(cfg.MaxLinksPerSpan)
-
- if hasParent {
- s.data.ParentSpanID = parent.SpanID
- }
- if internal.LocalSpanStoreEnabled {
- var ss *spanStore
- ss = spanStoreForNameCreateIfNew(name)
- if ss != nil {
- s.spanStore = ss
- ss.add(s)
- }
- }
-
- return s
-}
-
-// End ends the span.
-func (s *span) End() {
- if s == nil {
- return
- }
- if s.executionTracerTaskEnd != nil {
- s.executionTracerTaskEnd()
- }
- if !s.IsRecordingEvents() {
- return
- }
- s.endOnce.Do(func() {
- exp, _ := exporters.Load().(exportersMap)
- mustExport := s.spanContext.IsSampled() && len(exp) > 0
- if s.spanStore != nil || mustExport {
- sd := s.makeSpanData()
- sd.EndTime = internal.MonotonicEndTime(sd.StartTime)
- if s.spanStore != nil {
- s.spanStore.finished(s, sd)
- }
- if mustExport {
- for e := range exp {
- e.ExportSpan(sd)
- }
- }
- }
- })
-}
-
-// makeSpanData produces a SpanData representing the current state of the Span.
-// It requires that s.data is non-nil.
-func (s *span) makeSpanData() *SpanData {
- var sd SpanData
- s.mu.Lock()
- sd = *s.data
- if s.lruAttributes.len() > 0 {
- sd.Attributes = s.lruAttributesToAttributeMap()
- sd.DroppedAttributeCount = s.lruAttributes.droppedCount
- }
- if len(s.annotations.queue) > 0 {
- sd.Annotations = s.interfaceArrayToAnnotationArray()
- sd.DroppedAnnotationCount = s.annotations.droppedCount
- }
- if len(s.messageEvents.queue) > 0 {
- sd.MessageEvents = s.interfaceArrayToMessageEventArray()
- sd.DroppedMessageEventCount = s.messageEvents.droppedCount
- }
- if len(s.links.queue) > 0 {
- sd.Links = s.interfaceArrayToLinksArray()
- sd.DroppedLinkCount = s.links.droppedCount
- }
- s.mu.Unlock()
- return &sd
-}
-
-// SpanContext returns the SpanContext of the span.
-func (s *span) SpanContext() SpanContext {
- if s == nil {
- return SpanContext{}
- }
- return s.spanContext
-}
-
-// SetName sets the name of the span, if it is recording events.
-func (s *span) SetName(name string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.Name = name
- s.mu.Unlock()
-}
-
-// SetStatus sets the status of the span, if it is recording events.
-func (s *span) SetStatus(status Status) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.Status = status
- s.mu.Unlock()
-}
-
-func (s *span) interfaceArrayToLinksArray() []Link {
- linksArr := make([]Link, 0, len(s.links.queue))
- for _, value := range s.links.queue {
- linksArr = append(linksArr, value.(Link))
- }
- return linksArr
-}
-
-func (s *span) interfaceArrayToMessageEventArray() []MessageEvent {
- messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue))
- for _, value := range s.messageEvents.queue {
- messageEventArr = append(messageEventArr, value.(MessageEvent))
- }
- return messageEventArr
-}
-
-func (s *span) interfaceArrayToAnnotationArray() []Annotation {
- annotationArr := make([]Annotation, 0, len(s.annotations.queue))
- for _, value := range s.annotations.queue {
- annotationArr = append(annotationArr, value.(Annotation))
- }
- return annotationArr
-}
-
-func (s *span) lruAttributesToAttributeMap() map[string]interface{} {
- attributes := make(map[string]interface{}, s.lruAttributes.len())
- for _, key := range s.lruAttributes.keys() {
- value, ok := s.lruAttributes.get(key)
- if ok {
- keyStr := key.(string)
- attributes[keyStr] = value
- }
- }
- return attributes
-}
-
-func (s *span) copyToCappedAttributes(attributes []Attribute) {
- for _, a := range attributes {
- s.lruAttributes.add(a.key, a.value)
- }
-}
-
-func (s *span) addChild() {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.data.ChildSpanCount++
- s.mu.Unlock()
-}
-
-// AddAttributes sets attributes in the span.
-//
-// Existing attributes whose keys appear in the attributes parameter are overwritten.
-func (s *span) AddAttributes(attributes ...Attribute) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.copyToCappedAttributes(attributes)
- s.mu.Unlock()
-}
-
-func (s *span) printStringInternal(attributes []Attribute, str string) {
- now := time.Now()
- var am map[string]interface{}
- if len(attributes) != 0 {
- am = make(map[string]interface{}, len(attributes))
- for _, attr := range attributes {
- am[attr.key] = attr.value
- }
- }
- s.mu.Lock()
- s.annotations.add(Annotation{
- Time: now,
- Message: str,
- Attributes: am,
- })
- s.mu.Unlock()
-}
-
-// Annotate adds an annotation with attributes.
-// Attributes can be nil.
-func (s *span) Annotate(attributes []Attribute, str string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.printStringInternal(attributes, str)
-}
-
-// Annotatef adds an annotation with attributes.
-func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
- if !s.IsRecordingEvents() {
- return
- }
- s.printStringInternal(attributes, fmt.Sprintf(format, a...))
-}
-
-// AddMessageSendEvent adds a message send event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- now := time.Now()
- s.mu.Lock()
- s.messageEvents.add(MessageEvent{
- Time: now,
- EventType: MessageEventTypeSent,
- MessageID: messageID,
- UncompressedByteSize: uncompressedByteSize,
- CompressedByteSize: compressedByteSize,
- })
- s.mu.Unlock()
-}
-
-// AddMessageReceiveEvent adds a message receive event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- now := time.Now()
- s.mu.Lock()
- s.messageEvents.add(MessageEvent{
- Time: now,
- EventType: MessageEventTypeRecv,
- MessageID: messageID,
- UncompressedByteSize: uncompressedByteSize,
- CompressedByteSize: compressedByteSize,
- })
- s.mu.Unlock()
-}
-
-// AddLink adds a link to the span.
-func (s *span) AddLink(l Link) {
- if !s.IsRecordingEvents() {
- return
- }
- s.mu.Lock()
- s.links.add(l)
- s.mu.Unlock()
-}
-
-func (s *span) String() string {
- if s == nil {
- return ""
- }
- if s.data == nil {
- return fmt.Sprintf("span %s", s.spanContext.SpanID)
- }
- s.mu.Lock()
- str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name)
- s.mu.Unlock()
- return str
-}
-
-var config atomic.Value // access atomically
-
-func init() {
- config.Store(&Config{
- DefaultSampler: ProbabilitySampler(defaultSamplingProbability),
- IDGenerator: &defaultIDGenerator{},
- MaxAttributesPerSpan: DefaultMaxAttributesPerSpan,
- MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan,
- MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan,
- MaxLinksPerSpan: DefaultMaxLinksPerSpan,
- })
-}
-
-type defaultIDGenerator struct {
- sync.Mutex
-
- // Please keep these as the first fields
- // so that these 8 byte fields will be aligned on addresses
- // divisible by 8, on both 32-bit and 64-bit machines when
- // performing atomic increments and accesses.
- // See:
- // * https://github.com/census-instrumentation/opencensus-go/issues/587
- // * https://github.com/census-instrumentation/opencensus-go/issues/865
- // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG
- nextSpanID uint64
- spanIDInc uint64
-
- traceIDAdd [2]uint64
- traceIDRand *rand.Rand
-
- initOnce sync.Once
-}
-
-// init initializes the generator on the first call to avoid consuming entropy
-// unnecessarily.
-func (gen *defaultIDGenerator) init() {
- gen.initOnce.Do(func() {
- // initialize traceID and spanID generators.
- var rngSeed int64
- for _, p := range []interface{}{
- &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc,
- } {
- binary.Read(crand.Reader, binary.LittleEndian, p)
- }
- gen.traceIDRand = rand.New(rand.NewSource(rngSeed))
- gen.spanIDInc |= 1
- })
-}
-
-// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
-func (gen *defaultIDGenerator) NewSpanID() [8]byte {
- var id uint64
- for id == 0 {
- id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc)
- }
- var sid [8]byte
- binary.LittleEndian.PutUint64(sid[:], id)
- return sid
-}
-
-// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence.
-// mu should be held while this function is called.
-func (gen *defaultIDGenerator) NewTraceID() [16]byte {
- var tid [16]byte
- // Construct the trace ID from two outputs of traceIDRand, with a constant
- // added to each half for additional entropy.
- gen.Lock()
- binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0])
- binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1])
- gen.Unlock()
- return tid
-}
diff --git a/vendor/go.opencensus.io/trace/trace_api.go b/vendor/go.opencensus.io/trace/trace_api.go
deleted file mode 100644
index 9e2c3a999..000000000
--- a/vendor/go.opencensus.io/trace/trace_api.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2020, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
-)
-
-// DefaultTracer is the tracer used when package-level exported functions are invoked.
-var DefaultTracer Tracer = &tracer{}
-
-// Tracer can start spans and access context functions.
-type Tracer interface {
-
- // StartSpan starts a new child span of the current span in the context. If
- // there is no span in the context, creates a new trace and span.
- //
- // Returned context contains the newly created span. You can use it to
- // propagate the returned span in process.
- StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span)
-
- // StartSpanWithRemoteParent starts a new child span of the span from the given parent.
- //
- // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
- // preferred for cases where the parent is propagated via an incoming request.
- //
- // Returned context contains the newly created span. You can use it to
- // propagate the returned span in process.
- StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span)
-
- // FromContext returns the Span stored in a context, or nil if there isn't one.
- FromContext(ctx context.Context) *Span
-
- // NewContext returns a new context with the given Span attached.
- NewContext(parent context.Context, s *Span) context.Context
-}
-
-// StartSpan starts a new child span of the current span in the context. If
-// there is no span in the context, creates a new trace and span.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) {
- return DefaultTracer.StartSpan(ctx, name, o...)
-}
-
-// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
-//
-// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is
-// preferred for cases where the parent is propagated via an incoming request.
-//
-// Returned context contains the newly created span. You can use it to
-// propagate the returned span in process.
-func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) {
- return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...)
-}
-
-// FromContext returns the Span stored in a context, or a Span that is not
-// recording events if there isn't one.
-func FromContext(ctx context.Context) *Span {
- return DefaultTracer.FromContext(ctx)
-}
-
-// NewContext returns a new context with the given Span attached.
-func NewContext(parent context.Context, s *Span) context.Context {
- return DefaultTracer.NewContext(parent, s)
-}
-
-// SpanInterface represents a span of a trace. It has an associated SpanContext, and
-// stores data accumulated while the span is active.
-//
-// Ideally users should interact with Spans by calling the functions in this
-// package that take a Context parameter.
-type SpanInterface interface {
-
- // IsRecordingEvents returns true if events are being recorded for this span.
- // Use this check to avoid computing expensive annotations when they will never
- // be used.
- IsRecordingEvents() bool
-
- // End ends the span.
- End()
-
- // SpanContext returns the SpanContext of the span.
- SpanContext() SpanContext
-
- // SetName sets the name of the span, if it is recording events.
- SetName(name string)
-
- // SetStatus sets the status of the span, if it is recording events.
- SetStatus(status Status)
-
- // AddAttributes sets attributes in the span.
- //
- // Existing attributes whose keys appear in the attributes parameter are overwritten.
- AddAttributes(attributes ...Attribute)
-
- // Annotate adds an annotation with attributes.
- // Attributes can be nil.
- Annotate(attributes []Attribute, str string)
-
- // Annotatef adds an annotation with attributes.
- Annotatef(attributes []Attribute, format string, a ...interface{})
-
- // AddMessageSendEvent adds a message send event to the span.
- //
- // messageID is an identifier for the message, which is recommended to be
- // unique in this span and the same between the send event and the receive
- // event (this allows to identify a message between the sender and receiver).
- // For example, this could be a sequence id.
- AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64)
-
- // AddMessageReceiveEvent adds a message receive event to the span.
- //
- // messageID is an identifier for the message, which is recommended to be
- // unique in this span and the same between the send event and the receive
- // event (this allows to identify a message between the sender and receiver).
- // For example, this could be a sequence id.
- AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64)
-
- // AddLink adds a link to the span.
- AddLink(l Link)
-
- // String prints a string representation of a span.
- String() string
-}
-
-// NewSpan is a convenience function for creating a *Span out of a *span
-func NewSpan(s SpanInterface) *Span {
- return &Span{internal: s}
-}
-
-// Span is a struct wrapper around the SpanInt interface, which allows correctly handling
-// nil spans, while also allowing the SpanInterface implementation to be swapped out.
-type Span struct {
- internal SpanInterface
-}
-
-// Internal returns the underlying implementation of the Span
-func (s *Span) Internal() SpanInterface {
- return s.internal
-}
-
-// IsRecordingEvents returns true if events are being recorded for this span.
-// Use this check to avoid computing expensive annotations when they will never
-// be used.
-func (s *Span) IsRecordingEvents() bool {
- if s == nil {
- return false
- }
- return s.internal.IsRecordingEvents()
-}
-
-// End ends the span.
-func (s *Span) End() {
- if s == nil {
- return
- }
- s.internal.End()
-}
-
-// SpanContext returns the SpanContext of the span.
-func (s *Span) SpanContext() SpanContext {
- if s == nil {
- return SpanContext{}
- }
- return s.internal.SpanContext()
-}
-
-// SetName sets the name of the span, if it is recording events.
-func (s *Span) SetName(name string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.SetName(name)
-}
-
-// SetStatus sets the status of the span, if it is recording events.
-func (s *Span) SetStatus(status Status) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.SetStatus(status)
-}
-
-// AddAttributes sets attributes in the span.
-//
-// Existing attributes whose keys appear in the attributes parameter are overwritten.
-func (s *Span) AddAttributes(attributes ...Attribute) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddAttributes(attributes...)
-}
-
-// Annotate adds an annotation with attributes.
-// Attributes can be nil.
-func (s *Span) Annotate(attributes []Attribute, str string) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.Annotate(attributes, str)
-}
-
-// Annotatef adds an annotation with attributes.
-func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.Annotatef(attributes, format, a...)
-}
-
-// AddMessageSendEvent adds a message send event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize)
-}
-
-// AddMessageReceiveEvent adds a message receive event to the span.
-//
-// messageID is an identifier for the message, which is recommended to be
-// unique in this span and the same between the send event and the receive
-// event (this allows to identify a message between the sender and receiver).
-// For example, this could be a sequence id.
-func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize)
-}
-
-// AddLink adds a link to the span.
-func (s *Span) AddLink(l Link) {
- if !s.IsRecordingEvents() {
- return
- }
- s.internal.AddLink(l)
-}
-
-// String prints a string representation of a span.
-func (s *Span) String() string {
- if s == nil {
- return ""
- }
- return s.internal.String()
-}
diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go
deleted file mode 100644
index b8fc1e495..000000000
--- a/vendor/go.opencensus.io/trace/trace_go11.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.11
-// +build go1.11
-
-package trace
-
-import (
- "context"
- t "runtime/trace"
-)
-
-func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) {
- if !t.IsEnabled() {
- // Avoid additional overhead if
- // runtime/trace is not enabled.
- return ctx, func() {}
- }
- nctx, task := t.NewTask(ctx, name)
- return nctx, task.End
-}
diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go
deleted file mode 100644
index 2d6c713eb..000000000
--- a/vendor/go.opencensus.io/trace/tracestate/tracestate.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2018, OpenCensus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tracestate implements support for the Tracestate header of the
-// W3C TraceContext propagation format.
-package tracestate
-
-import (
- "fmt"
- "regexp"
-)
-
-const (
- keyMaxSize = 256
- valueMaxSize = 256
- maxKeyValuePairs = 32
-)
-
-const (
- keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}`
- keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
- keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)`
- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
-)
-
-var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`)
-var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`)
-
-// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different
-// vendors propagate additional information and inter-operate with their legacy Id formats.
-type Tracestate struct {
- entries []Entry
-}
-
-// Entry represents one key-value pair in a list of key-value pair of Tracestate.
-type Entry struct {
- // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter,
- // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and
- // forward slashes /.
- Key string
-
- // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the
- // range 0x20 to 0x7E) except comma , and =.
- Value string
-}
-
-// Entries returns a slice of Entry.
-func (ts *Tracestate) Entries() []Entry {
- if ts == nil {
- return nil
- }
- return ts.entries
-}
-
-func (ts *Tracestate) remove(key string) *Entry {
- for index, entry := range ts.entries {
- if entry.Key == key {
- ts.entries = append(ts.entries[:index], ts.entries[index+1:]...)
- return &entry
- }
- }
- return nil
-}
-
-func (ts *Tracestate) add(entries []Entry) error {
- for _, entry := range entries {
- ts.remove(entry.Key)
- }
- if len(ts.entries)+len(entries) > maxKeyValuePairs {
- return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d",
- len(entries), len(ts.entries), maxKeyValuePairs)
- }
- ts.entries = append(entries, ts.entries...)
- return nil
-}
-
-func isValid(entry Entry) bool {
- return keyValidationRegExp.MatchString(entry.Key) &&
- valueValidationRegExp.MatchString(entry.Value)
-}
-
-func containsDuplicateKey(entries ...Entry) (string, bool) {
- keyMap := make(map[string]int)
- for _, entry := range entries {
- if _, ok := keyMap[entry.Key]; ok {
- return entry.Key, true
- }
- keyMap[entry.Key] = 1
- }
- return "", false
-}
-
-func areEntriesValid(entries ...Entry) (*Entry, bool) {
- for _, entry := range entries {
- if !isValid(entry) {
- return &entry, false
- }
- }
- return nil, true
-}
-
-// New creates a Tracestate object from a parent and/or entries (key-value pair).
-// Entries from the parent are copied if present. The entries passed to this function
-// are inserted in front of those copied from the parent. If an entry copied from the
-// parent contains the same key as one of the entry in entries then the entry copied
-// from the parent is removed. See add func.
-//
-// An error is returned with nil Tracestate if
-// 1. one or more entry in entries is invalid.
-// 2. two or more entries in the input entries have the same key.
-// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs.
-// (duplicate entry is counted only once).
-func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) {
- if parent == nil && len(entries) == 0 {
- return nil, nil
- }
- if entry, ok := areEntriesValid(entries...); !ok {
- return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value)
- }
-
- if key, duplicate := containsDuplicateKey(entries...); duplicate {
- return nil, fmt.Errorf("contains duplicate keys (%s)", key)
- }
-
- tracestate := Tracestate{}
-
- if parent != nil && len(parent.entries) > 0 {
- tracestate.entries = append([]Entry{}, parent.entries...)
- }
-
- err := tracestate.add(entries)
- if err != nil {
- return nil, err
- }
- return &tracestate, nil
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
index 296407f38..e65c4907c 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
@@ -4,13 +4,14 @@
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
import (
- "google.golang.org/grpc/stats"
+ "context"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
+ "google.golang.org/grpc/stats"
)
// ScopeName is the instrumentation scope name.
@@ -39,6 +40,9 @@ type config struct {
SpanAttributes []attribute.KeyValue
MetricAttributes []attribute.KeyValue
+ PublicEndpoint bool
+ PublicEndpointFn func(ctx context.Context, info *stats.RPCTagInfo) bool
+
ReceivedEvent bool
SentEvent bool
}
@@ -61,6 +65,38 @@ func newConfig(opts []Option) *config {
return c
}
+type publicEndpointOption struct{ p bool }
+
+func (o publicEndpointOption) apply(c *config) {
+ c.PublicEndpoint = o.p
+}
+
+// WithPublicEndpoint configures the Handler to link the span with an incoming
+// span context. If this option is not provided, then the association is a child
+// association instead of a link.
+func WithPublicEndpoint() Option {
+ return publicEndpointOption{p: true}
+}
+
+type publicEndpointFnOption struct {
+ fn func(context.Context, *stats.RPCTagInfo) bool
+}
+
+func (o publicEndpointFnOption) apply(c *config) {
+ if o.fn != nil {
+ c.PublicEndpointFn = o.fn
+ }
+}
+
+// WithPublicEndpointFn runs with every request, and allows conditionally
+// configuring the Handler to link the span with an incoming span context. If
+// this option is not provided or returns false, then the association is a
+// child association instead of a link.
+// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn.
+func WithPublicEndpointFn(fn func(context.Context, *stats.RPCTagInfo) bool) Option {
+ return publicEndpointFnOption{fn: fn}
+}
+
type propagatorsOption struct{ p propagation.TextMapPropagator }
func (o propagatorsOption) apply(c *config) {
@@ -178,6 +214,8 @@ func (o spanStartOption) apply(c *config) {
// WithSpanOptions configures an additional set of
// trace.SpanOptions, which are applied to each new span.
+//
+// Deprecated: It is only used by the deprecated interceptor, and is unused by [NewClientHandler] and [NewServerHandler].
func WithSpanOptions(opts ...trace.SpanStartOption) Option {
return spanStartOption{opts}
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
index f63513d45..99f88ec3b 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
@@ -4,318 +4,18 @@
package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
// gRPC tracing middleware
-// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md
+// https://opentelemetry.io/docs/specs/semconv/rpc/
import (
- "context"
- "errors"
- "io"
"net"
"strconv"
- "google.golang.org/grpc"
- grpc_codes "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/status"
- "google.golang.org/protobuf/proto"
-
- "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.30.0"
- "go.opentelemetry.io/otel/trace"
-)
-
-type messageType attribute.KeyValue
-
-// Event adds an event of the messageType to the span associated with the
-// passed context with a message id.
-func (m messageType) Event(ctx context.Context, id int, _ interface{}) {
- span := trace.SpanFromContext(ctx)
- if !span.IsRecording() {
- return
- }
- span.AddEvent("message", trace.WithAttributes(
- attribute.KeyValue(m),
- semconv.RPCMessageIDKey.Int(id),
- ))
-}
-
-var (
- messageSent = messageType(semconv.RPCMessageTypeSent)
- messageReceived = messageType(semconv.RPCMessageTypeReceived)
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ grpc_codes "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
-// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and
-// SendMsg method call.
-type clientStream struct {
- grpc.ClientStream
- desc *grpc.StreamDesc
-
- span trace.Span
-
- receivedEvent bool
- sentEvent bool
-
- receivedMessageID int
- sentMessageID int
-}
-
-var _ = proto.Marshal
-
-func (w *clientStream) RecvMsg(m interface{}) error {
- err := w.ClientStream.RecvMsg(m)
-
- if err == nil && !w.desc.ServerStreams {
- w.endSpan(nil)
- } else if errors.Is(err, io.EOF) {
- w.endSpan(nil)
- } else if err != nil {
- w.endSpan(err)
- } else {
- w.receivedMessageID++
-
- if w.receivedEvent {
- messageReceived.Event(w.Context(), w.receivedMessageID, m)
- }
- }
-
- return err
-}
-
-func (w *clientStream) SendMsg(m interface{}) error {
- err := w.ClientStream.SendMsg(m)
-
- w.sentMessageID++
-
- if w.sentEvent {
- messageSent.Event(w.Context(), w.sentMessageID, m)
- }
-
- if err != nil {
- w.endSpan(err)
- }
-
- return err
-}
-
-func (w *clientStream) Header() (metadata.MD, error) {
- md, err := w.ClientStream.Header()
- if err != nil {
- w.endSpan(err)
- }
-
- return md, err
-}
-
-func (w *clientStream) CloseSend() error {
- err := w.ClientStream.CloseSend()
- if err != nil {
- w.endSpan(err)
- }
-
- return err
-}
-
-func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream {
- return &clientStream{
- ClientStream: s,
- span: span,
- desc: desc,
- receivedEvent: cfg.ReceivedEvent,
- sentEvent: cfg.SentEvent,
- }
-}
-
-func (w *clientStream) endSpan(err error) {
- if err != nil {
- s, _ := status.FromError(err)
- w.span.SetStatus(codes.Error, s.Message())
- w.span.SetAttributes(statusCodeAttr(s.Code()))
- } else {
- w.span.SetAttributes(statusCodeAttr(grpc_codes.OK))
- }
-
- w.span.End()
-}
-
-// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
-// for use in a grpc.NewClient call.
-//
-// Deprecated: Use [NewClientHandler] instead.
-func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
- cfg := newConfig(opts)
- tracer := cfg.TracerProvider.Tracer(
- ScopeName,
- trace.WithInstrumentationVersion(Version()),
- )
-
- return func(
- ctx context.Context,
- desc *grpc.StreamDesc,
- cc *grpc.ClientConn,
- method string,
- streamer grpc.Streamer,
- callOpts ...grpc.CallOption,
- ) (grpc.ClientStream, error) {
- i := &InterceptorInfo{
- Method: method,
- Type: StreamClient,
- }
- if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
- return streamer(ctx, desc, cc, method, callOpts...)
- }
-
- name, attr := telemetryAttributes(method, cc.Target())
-
- startOpts := append([]trace.SpanStartOption{
- trace.WithSpanKind(trace.SpanKindClient),
- trace.WithAttributes(attr...),
- },
- cfg.SpanStartOptions...,
- )
-
- ctx, span := tracer.Start(
- ctx,
- name,
- startOpts...,
- )
-
- ctx = inject(ctx, cfg.Propagators)
-
- s, err := streamer(ctx, desc, cc, method, callOpts...)
- if err != nil {
- grpcStatus, _ := status.FromError(err)
- span.SetStatus(codes.Error, grpcStatus.Message())
- span.SetAttributes(statusCodeAttr(grpcStatus.Code()))
- span.End()
- return s, err
- }
- stream := wrapClientStream(s, desc, span, cfg)
- return stream, nil
- }
-}
-
-// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and
-// SendMsg method call.
-type serverStream struct {
- grpc.ServerStream
- ctx context.Context
-
- receivedMessageID int
- sentMessageID int
-
- receivedEvent bool
- sentEvent bool
-}
-
-func (w *serverStream) Context() context.Context {
- return w.ctx
-}
-
-func (w *serverStream) RecvMsg(m interface{}) error {
- err := w.ServerStream.RecvMsg(m)
-
- if err == nil {
- w.receivedMessageID++
- if w.receivedEvent {
- messageReceived.Event(w.Context(), w.receivedMessageID, m)
- }
- }
-
- return err
-}
-
-func (w *serverStream) SendMsg(m interface{}) error {
- err := w.ServerStream.SendMsg(m)
-
- w.sentMessageID++
- if w.sentEvent {
- messageSent.Event(w.Context(), w.sentMessageID, m)
- }
-
- return err
-}
-
-func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream {
- return &serverStream{
- ServerStream: ss,
- ctx: ctx,
- receivedEvent: cfg.ReceivedEvent,
- sentEvent: cfg.SentEvent,
- }
-}
-
-// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
-// for use in a grpc.NewServer call.
-//
-// Deprecated: Use [NewServerHandler] instead.
-func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
- cfg := newConfig(opts)
- tracer := cfg.TracerProvider.Tracer(
- ScopeName,
- trace.WithInstrumentationVersion(Version()),
- )
-
- return func(
- srv interface{},
- ss grpc.ServerStream,
- info *grpc.StreamServerInfo,
- handler grpc.StreamHandler,
- ) error {
- ctx := ss.Context()
- i := &InterceptorInfo{
- StreamServerInfo: info,
- Type: StreamServer,
- }
- if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
- return handler(srv, wrapServerStream(ctx, ss, cfg))
- }
-
- ctx = extract(ctx, cfg.Propagators)
- name, attr := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
-
- startOpts := append([]trace.SpanStartOption{
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithAttributes(attr...),
- },
- cfg.SpanStartOptions...,
- )
-
- ctx, span := tracer.Start(
- trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
- name,
- startOpts...,
- )
- defer span.End()
-
- err := handler(srv, wrapServerStream(ctx, ss, cfg))
- if err != nil {
- s, _ := status.FromError(err)
- statusCode, msg := serverStatus(s)
- span.SetStatus(statusCode, msg)
- span.SetAttributes(statusCodeAttr(s.Code()))
- } else {
- span.SetAttributes(statusCodeAttr(grpc_codes.OK))
- }
-
- return err
- }
-}
-
-// telemetryAttributes returns a span name and span and metric attributes from
-// the gRPC method and peer address.
-func telemetryAttributes(fullMethod, sererAddr string) (string, []attribute.KeyValue) {
- name, methodAttrs := internal.ParseFullMethod(fullMethod)
- srvAttrs := serverAddrAttrs(sererAddr)
-
- attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(srvAttrs))
- attrs = append(attrs, semconv.RPCSystemGRPC)
- attrs = append(attrs, methodAttrs...)
- attrs = append(attrs, srvAttrs...)
- return name, attrs
-}
-
// serverAddrAttrs returns the server address attributes for the hostport.
func serverAddrAttrs(hostport string) []attribute.KeyValue {
h, pStr, err := net.SplitHostPort(hostport)
@@ -333,20 +33,6 @@ func serverAddrAttrs(hostport string) []attribute.KeyValue {
}
}
-// peerFromCtx returns a peer address from a context, if one exists.
-func peerFromCtx(ctx context.Context) string {
- p, ok := peer.FromContext(ctx)
- if !ok {
- return ""
- }
- return p.Addr.String()
-}
-
-// statusCodeAttr returns status code attribute based on given gRPC code.
-func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue {
- return semconv.RPCGRPCStatusCodeKey.Int64(int64(c))
-}
-
// serverStatus returns a span status code and message for a given gRPC
// status code. It maps specific gRPC status codes to a corresponding span
// status code and message. This function is intended for use on the server
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
index 1fa73c2f9..e46185e0b 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
@@ -8,7 +8,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.30.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
// ParseFullMethod returns a span name following the OpenTelemetry semantic
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
index 6e67f0216..b427e1724 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
@@ -6,15 +6,14 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
import (
"context"
- "google.golang.org/grpc/metadata"
-
"go.opentelemetry.io/otel/baggage"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
+ "google.golang.org/grpc/metadata"
)
type metadataSupplier struct {
- metadata *metadata.MD
+ metadata metadata.MD
}
// assert that metadataSupplier implements the TextMapCarrier interface.
@@ -28,13 +27,13 @@ func (s *metadataSupplier) Get(key string) string {
return values[0]
}
-func (s *metadataSupplier) Set(key string, value string) {
+func (s *metadataSupplier) Set(key, value string) {
s.metadata.Set(key, value)
}
func (s *metadataSupplier) Keys() []string {
- out := make([]string, 0, len(*s.metadata))
- for key := range *s.metadata {
+ out := make([]string, 0, len(s.metadata))
+ for key := range s.metadata {
out = append(out, key)
}
return out
@@ -43,11 +42,12 @@ func (s *metadataSupplier) Keys() []string {
// Inject injects correlation context and span context into the gRPC
// metadata object. This function is meant to be used on outgoing
// requests.
+//
// Deprecated: Unnecessary public func.
func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
c := newConfig(opts)
c.Propagators.Inject(ctx, &metadataSupplier{
- metadata: md,
+ metadata: *md,
})
}
@@ -57,7 +57,7 @@ func inject(ctx context.Context, propagators propagation.TextMapPropagator) cont
md = metadata.MD{}
}
propagators.Inject(ctx, &metadataSupplier{
- metadata: &md,
+ metadata: md,
})
return metadata.NewOutgoingContext(ctx, md)
}
@@ -65,11 +65,12 @@ func inject(ctx context.Context, propagators propagation.TextMapPropagator) cont
// Extract returns the correlation context and span context that
// another service encoded in the gRPC metadata object with Inject.
// This function is meant to be used on incoming requests.
+//
// Deprecated: Unnecessary public func.
func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
c := newConfig(opts)
ctx = c.Propagators.Extract(ctx, &metadataSupplier{
- metadata: md,
+ metadata: *md,
})
return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx)
@@ -82,6 +83,6 @@ func extract(ctx context.Context, propagators propagation.TextMapPropagator) con
}
return propagators.Extract(ctx, &metadataSupplier{
- metadata: &md,
+ metadata: md,
})
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
index 9bec51df3..29d7ab2bd 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
@@ -8,18 +8,17 @@ import (
"sync/atomic"
"time"
- grpc_codes "google.golang.org/grpc/codes"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
- semconv "go.opentelemetry.io/otel/semconv/v1.30.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/rpcconv"
"go.opentelemetry.io/otel/trace"
+ grpc_codes "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/peer"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
)
@@ -38,11 +37,11 @@ type serverHandler struct {
tracer trace.Tracer
- duration metric.Float64Histogram
- inSize metric.Int64Histogram
- outSize metric.Int64Histogram
- inMsg metric.Int64Histogram
- outMsg metric.Int64Histogram
+ duration rpcconv.ServerDuration
+ inSize rpcconv.ServerRequestSize
+ outSize rpcconv.ServerResponseSize
+ inMsg rpcconv.ServerRequestsPerRPC
+ outMsg rpcconv.ServerResponsesPerRPC
}
// NewServerHandler creates a stats.Handler for a gRPC server.
@@ -62,76 +61,41 @@ func NewServerHandler(opts ...Option) stats.Handler {
)
var err error
- h.duration, err = meter.Float64Histogram(
- semconv.RPCServerDurationName,
- metric.WithDescription(semconv.RPCServerDurationDescription),
- metric.WithUnit(semconv.RPCServerDurationUnit),
- )
+ h.duration, err = rpcconv.NewServerDuration(meter)
if err != nil {
otel.Handle(err)
- if h.duration == nil {
- h.duration = noop.Float64Histogram{}
- }
}
- h.inSize, err = meter.Int64Histogram(
- semconv.RPCServerRequestSizeName,
- metric.WithDescription(semconv.RPCServerRequestSizeDescription),
- metric.WithUnit(semconv.RPCServerRequestSizeUnit),
- )
+ h.inSize, err = rpcconv.NewServerRequestSize(meter)
if err != nil {
otel.Handle(err)
- if h.inSize == nil {
- h.inSize = noop.Int64Histogram{}
- }
}
- h.outSize, err = meter.Int64Histogram(
- semconv.RPCServerResponseSizeName,
- metric.WithDescription(semconv.RPCServerResponseSizeDescription),
- metric.WithUnit(semconv.RPCServerResponseSizeUnit),
- )
+ h.outSize, err = rpcconv.NewServerResponseSize(meter)
if err != nil {
otel.Handle(err)
- if h.outSize == nil {
- h.outSize = noop.Int64Histogram{}
- }
}
- h.inMsg, err = meter.Int64Histogram(
- semconv.RPCServerRequestsPerRPCName,
- metric.WithDescription(semconv.RPCServerRequestsPerRPCDescription),
- metric.WithUnit(semconv.RPCServerRequestsPerRPCUnit),
- )
+ h.inMsg, err = rpcconv.NewServerRequestsPerRPC(meter)
if err != nil {
otel.Handle(err)
- if h.inMsg == nil {
- h.inMsg = noop.Int64Histogram{}
- }
}
- h.outMsg, err = meter.Int64Histogram(
- semconv.RPCServerResponsesPerRPCName,
- metric.WithDescription(semconv.RPCServerResponsesPerRPCDescription),
- metric.WithUnit(semconv.RPCServerResponsesPerRPCUnit),
- )
+ h.outMsg, err = rpcconv.NewServerResponsesPerRPC(meter)
if err != nil {
otel.Handle(err)
- if h.outMsg == nil {
- h.outMsg = noop.Int64Histogram{}
- }
}
return h
}
// TagConn can attach some information to the given context.
-func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+func (*serverHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
return ctx
}
// HandleConn processes the Conn stats.
-func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
+func (*serverHandler) HandleConn(context.Context, stats.ConnStats) {
}
// TagRPC can attach some information to the given context.
@@ -147,11 +111,21 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
}
if record {
+ opts := []trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(append(attrs, h.SpanAttributes...)...),
+ }
+ if h.PublicEndpoint || (h.PublicEndpointFn != nil && h.PublicEndpointFn(ctx, info)) {
+ opts = append(opts, trace.WithNewRoot())
+ // Linking incoming span context if any for public endpoint.
+ if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() {
+ opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s}))
+ }
+ }
ctx, _ = h.tracer.Start(
trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
name,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithAttributes(append(attrs, h.SpanAttributes...)...),
+ opts...,
)
}
@@ -165,7 +139,16 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
// HandleRPC processes the RPC stats.
func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- h.handleRPC(ctx, rs, h.duration, h.inSize, h.outSize, h.inMsg, h.outMsg, serverStatus)
+ h.handleRPC(
+ ctx,
+ rs,
+ h.duration.Inst(),
+ h.inSize,
+ h.outSize,
+ h.inMsg.Inst(),
+ h.outMsg.Inst(),
+ serverStatus,
+ )
}
type clientHandler struct {
@@ -173,11 +156,11 @@ type clientHandler struct {
tracer trace.Tracer
- duration metric.Float64Histogram
- inSize metric.Int64Histogram
- outSize metric.Int64Histogram
- inMsg metric.Int64Histogram
- outMsg metric.Int64Histogram
+ duration rpcconv.ClientDuration
+ inSize rpcconv.ClientResponseSize
+ outSize rpcconv.ClientRequestSize
+ inMsg rpcconv.ClientResponsesPerRPC
+ outMsg rpcconv.ClientRequestsPerRPC
}
// NewClientHandler creates a stats.Handler for a gRPC client.
@@ -197,64 +180,29 @@ func NewClientHandler(opts ...Option) stats.Handler {
)
var err error
- h.duration, err = meter.Float64Histogram(
- semconv.RPCClientDurationName,
- metric.WithDescription(semconv.RPCClientDurationDescription),
- metric.WithUnit(semconv.RPCClientDurationUnit),
- )
+ h.duration, err = rpcconv.NewClientDuration(meter)
if err != nil {
otel.Handle(err)
- if h.duration == nil {
- h.duration = noop.Float64Histogram{}
- }
}
- h.outSize, err = meter.Int64Histogram(
- semconv.RPCClientRequestSizeName,
- metric.WithDescription(semconv.RPCClientRequestSizeDescription),
- metric.WithUnit(semconv.RPCClientRequestSizeUnit),
- )
+ h.inSize, err = rpcconv.NewClientResponseSize(meter)
if err != nil {
otel.Handle(err)
- if h.outSize == nil {
- h.outSize = noop.Int64Histogram{}
- }
}
- h.inSize, err = meter.Int64Histogram(
- semconv.RPCClientResponseSizeName,
- metric.WithDescription(semconv.RPCClientResponseSizeDescription),
- metric.WithUnit(semconv.RPCClientResponseSizeUnit),
- )
+ h.outSize, err = rpcconv.NewClientRequestSize(meter)
if err != nil {
otel.Handle(err)
- if h.inSize == nil {
- h.inSize = noop.Int64Histogram{}
- }
}
- h.outMsg, err = meter.Int64Histogram(
- semconv.RPCClientRequestsPerRPCName,
- metric.WithDescription(semconv.RPCClientRequestsPerRPCDescription),
- metric.WithUnit(semconv.RPCClientRequestsPerRPCUnit),
- )
+ h.inMsg, err = rpcconv.NewClientResponsesPerRPC(meter)
if err != nil {
otel.Handle(err)
- if h.outMsg == nil {
- h.outMsg = noop.Int64Histogram{}
- }
}
- h.inMsg, err = meter.Int64Histogram(
- semconv.RPCClientResponsesPerRPCName,
- metric.WithDescription(semconv.RPCClientResponsesPerRPCDescription),
- metric.WithUnit(semconv.RPCClientResponsesPerRPCUnit),
- )
+ h.outMsg, err = rpcconv.NewClientRequestsPerRPC(meter)
if err != nil {
otel.Handle(err)
- if h.inMsg == nil {
- h.inMsg = noop.Int64Histogram{}
- }
}
return h
@@ -290,7 +238,13 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont
// HandleRPC processes the RPC stats.
func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
h.handleRPC(
- ctx, rs, h.duration, h.inSize, h.outSize, h.inMsg, h.outMsg,
+ ctx,
+ rs,
+ h.duration.Inst(),
+ h.inSize,
+ h.outSize,
+ h.inMsg.Inst(),
+ h.outMsg.Inst(),
func(s *status.Status) (codes.Code, string) {
return codes.Error, s.Message()
},
@@ -298,20 +252,25 @@ func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
}
// TagConn can attach some information to the given context.
-func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+func (*clientHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context {
return ctx
}
// HandleConn processes the Conn stats.
-func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) {
+func (*clientHandler) HandleConn(context.Context, stats.ConnStats) {
// no-op
}
+type int64Hist interface {
+ Record(context.Context, int64, ...attribute.KeyValue)
+}
+
func (c *config) handleRPC(
ctx context.Context,
rs stats.RPCStats,
duration metric.Float64Histogram,
- inSize, outSize, inMsg, outMsg metric.Int64Histogram,
+ inSize, outSize int64Hist,
+ inMsg, outMsg metric.Int64Histogram,
recordStatus func(*status.Status) (codes.Code, string),
) {
gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
@@ -327,7 +286,7 @@ func (c *config) handleRPC(
case *stats.InPayload:
if gctx != nil {
messageId = atomic.AddInt64(&gctx.inMessages, 1)
- inSize.Record(ctx, int64(rs.Length), metric.WithAttributes(gctx.metricAttrs...))
+ inSize.Record(ctx, int64(rs.Length), gctx.metricAttrs...)
}
if c.ReceivedEvent && span.IsRecording() {
@@ -343,7 +302,7 @@ func (c *config) handleRPC(
case *stats.OutPayload:
if gctx != nil {
messageId = atomic.AddInt64(&gctx.outMessages, 1)
- outSize.Record(ctx, int64(rs.Length), metric.WithAttributes(gctx.metricAttrs...))
+ outSize.Record(ctx, int64(rs.Length), gctx.metricAttrs...)
}
if c.SentEvent && span.IsRecording() {
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
index b1feeca49..aa4f4e212 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
@@ -5,6 +5,6 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g
// Version is the current release version of the gRPC instrumentation.
func Version() string {
- return "0.61.0"
+ return "0.63.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
deleted file mode 100644
index b25641c55..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "net/url"
- "strings"
-)
-
-// DefaultClient is the default Client and is used by Get, Head, Post and PostForm.
-// Please be careful of initialization order - for example, if you change
-// the global propagator, the DefaultClient might still be using the old one.
-var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
-
-// Get is a convenient replacement for http.Get that adds a span around the request.
-func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil)
- if err != nil {
- return nil, err
- }
- return DefaultClient.Do(req)
-}
-
-// Head is a convenient replacement for http.Head that adds a span around the request.
-func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil)
- if err != nil {
- return nil, err
- }
- return DefaultClient.Do(req)
-}
-
-// Post is a convenient replacement for http.Post that adds a span around the request.
-func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", contentType)
- return DefaultClient.Do(req)
-}
-
-// PostForm is a convenient replacement for http.PostForm that adds a span around the request.
-func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) {
- return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
deleted file mode 100644
index a83a02627..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "net/http"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Attribute keys that can be added to a span.
-const (
- ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read
- ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded)
- WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written
- WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
-)
-
-// Filter is a predicate used to determine whether a given http.request should
-// be traced. A Filter must return true if the request should be traced.
-type Filter func(*http.Request) bool
-
-func newTracer(tp trace.TracerProvider) trace.Tracer {
- return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version()))
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
deleted file mode 100644
index 6bd50d4c9..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "net/http"
- "net/http/httptrace"
-
- "go.opentelemetry.io/otel/attribute"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/trace"
-)
-
-// ScopeName is the instrumentation scope name.
-const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-// config represents the configuration options available for the http.Handler
-// and http.Transport types.
-type config struct {
- ServerName string
- Tracer trace.Tracer
- Meter metric.Meter
- Propagators propagation.TextMapPropagator
- SpanStartOptions []trace.SpanStartOption
- PublicEndpoint bool
- PublicEndpointFn func(*http.Request) bool
- ReadEvent bool
- WriteEvent bool
- Filters []Filter
- SpanNameFormatter func(string, *http.Request) string
- ClientTrace func(context.Context) *httptrace.ClientTrace
-
- TracerProvider trace.TracerProvider
- MeterProvider metric.MeterProvider
- MetricAttributesFn func(*http.Request) []attribute.KeyValue
-}
-
-// Option interface used for setting optional config properties.
-type Option interface {
- apply(*config)
-}
-
-type optionFunc func(*config)
-
-func (o optionFunc) apply(c *config) {
- o(c)
-}
-
-// newConfig creates a new config struct and applies opts to it.
-func newConfig(opts ...Option) *config {
- c := &config{
- Propagators: otel.GetTextMapPropagator(),
- MeterProvider: otel.GetMeterProvider(),
- }
- for _, opt := range opts {
- opt.apply(c)
- }
-
- // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
- if c.TracerProvider != nil {
- c.Tracer = newTracer(c.TracerProvider)
- }
-
- c.Meter = c.MeterProvider.Meter(
- ScopeName,
- metric.WithInstrumentationVersion(Version()),
- )
-
- return c
-}
-
-// WithTracerProvider specifies a tracer provider to use for creating a tracer.
-// If none is specified, the global provider is used.
-func WithTracerProvider(provider trace.TracerProvider) Option {
- return optionFunc(func(cfg *config) {
- if provider != nil {
- cfg.TracerProvider = provider
- }
- })
-}
-
-// WithMeterProvider specifies a meter provider to use for creating a meter.
-// If none is specified, the global provider is used.
-func WithMeterProvider(provider metric.MeterProvider) Option {
- return optionFunc(func(cfg *config) {
- if provider != nil {
- cfg.MeterProvider = provider
- }
- })
-}
-
-// WithPublicEndpoint configures the Handler to link the span with an incoming
-// span context. If this option is not provided, then the association is a child
-// association instead of a link.
-func WithPublicEndpoint() Option {
- return optionFunc(func(c *config) {
- c.PublicEndpoint = true
- })
-}
-
-// WithPublicEndpointFn runs with every request, and allows conditionally
-// configuring the Handler to link the span with an incoming span context. If
-// this option is not provided or returns false, then the association is a
-// child association instead of a link.
-// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn.
-func WithPublicEndpointFn(fn func(*http.Request) bool) Option {
- return optionFunc(func(c *config) {
- c.PublicEndpointFn = fn
- })
-}
-
-// WithPropagators configures specific propagators. If this
-// option isn't specified, then the global TextMapPropagator is used.
-func WithPropagators(ps propagation.TextMapPropagator) Option {
- return optionFunc(func(c *config) {
- if ps != nil {
- c.Propagators = ps
- }
- })
-}
-
-// WithSpanOptions configures an additional set of
-// trace.SpanOptions, which are applied to each new span.
-func WithSpanOptions(opts ...trace.SpanStartOption) Option {
- return optionFunc(func(c *config) {
- c.SpanStartOptions = append(c.SpanStartOptions, opts...)
- })
-}
-
-// WithFilter adds a filter to the list of filters used by the handler.
-// If any filter indicates to exclude a request then the request will not be
-// traced. All filters must allow a request to be traced for a Span to be created.
-// If no filters are provided then all requests are traced.
-// Filters will be invoked for each processed request, it is advised to make them
-// simple and fast.
-func WithFilter(f Filter) Option {
- return optionFunc(func(c *config) {
- c.Filters = append(c.Filters, f)
- })
-}
-
-type event int
-
-// Different types of events that can be recorded, see WithMessageEvents.
-const (
- ReadEvents event = iota
- WriteEvents
-)
-
-// WithMessageEvents configures the Handler to record the specified events
-// (span.AddEvent) on spans. By default only summary attributes are added at the
-// end of the request.
-//
-// Valid events are:
-// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read
-// using the ReadBytesKey
-// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
-// using the WriteBytesKey
-func WithMessageEvents(events ...event) Option {
- return optionFunc(func(c *config) {
- for _, e := range events {
- switch e {
- case ReadEvents:
- c.ReadEvent = true
- case WriteEvents:
- c.WriteEvent = true
- }
- }
- })
-}
-
-// WithSpanNameFormatter takes a function that will be called on every
-// request and the returned string will become the Span Name.
-//
-// When using [http.ServeMux] (or any middleware that sets the Pattern of [http.Request]),
-// the span name formatter will run twice. Once when the span is created, and
-// second time after the middleware, so the pattern can be used.
-func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option {
- return optionFunc(func(c *config) {
- c.SpanNameFormatter = f
- })
-}
-
-// WithClientTrace takes a function that returns client trace instance that will be
-// applied to the requests sent through the otelhttp Transport.
-func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option {
- return optionFunc(func(c *config) {
- c.ClientTrace = f
- })
-}
-
-// WithServerName returns an Option that sets the name of the (virtual) server
-// handling requests.
-func WithServerName(server string) Option {
- return optionFunc(func(c *config) {
- c.ServerName = server
- })
-}
-
-// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue.
-// These attributes will be included in metrics for every request.
-func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option {
- return optionFunc(func(c *config) {
- c.MetricAttributesFn = metricAttributesFn
- })
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
deleted file mode 100644
index 56b24b982..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package otelhttp provides an http.Handler and functions that are intended
-// to be used to add tracing by wrapping existing handlers (with Handler) and
-// routes WithRouteTag.
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
deleted file mode 100644
index 937f9b4e7..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "net/http"
- "time"
-
- "github.com/felixge/httpsnoop"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/trace"
-)
-
-// middleware is an http middleware which wraps the next handler in a span.
-type middleware struct {
- operation string
- server string
-
- tracer trace.Tracer
- propagators propagation.TextMapPropagator
- spanStartOptions []trace.SpanStartOption
- readEvent bool
- writeEvent bool
- filters []Filter
- spanNameFormatter func(string, *http.Request) string
- publicEndpoint bool
- publicEndpointFn func(*http.Request) bool
- metricAttributesFn func(*http.Request) []attribute.KeyValue
-
- semconv semconv.HTTPServer
-}
-
-func defaultHandlerFormatter(operation string, _ *http.Request) string {
- return operation
-}
-
-// NewHandler wraps the passed handler in a span named after the operation and
-// enriches it with metrics.
-func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler {
- return NewMiddleware(operation, opts...)(handler)
-}
-
-// NewMiddleware returns a tracing and metrics instrumentation middleware.
-// The handler returned by the middleware wraps a handler
-// in a span named after the operation and enriches it with metrics.
-func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
- h := middleware{
- operation: operation,
- }
-
- defaultOpts := []Option{
- WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)),
- WithSpanNameFormatter(defaultHandlerFormatter),
- }
-
- c := newConfig(append(defaultOpts, opts...)...)
- h.configure(c)
-
- return func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- h.serveHTTP(w, r, next)
- })
- }
-}
-
-func (h *middleware) configure(c *config) {
- h.tracer = c.Tracer
- h.propagators = c.Propagators
- h.spanStartOptions = c.SpanStartOptions
- h.readEvent = c.ReadEvent
- h.writeEvent = c.WriteEvent
- h.filters = c.Filters
- h.spanNameFormatter = c.SpanNameFormatter
- h.publicEndpoint = c.PublicEndpoint
- h.publicEndpointFn = c.PublicEndpointFn
- h.server = c.ServerName
- h.semconv = semconv.NewHTTPServer(c.Meter)
- h.metricAttributesFn = c.MetricAttributesFn
-}
-
-// serveHTTP sets up tracing and calls the given next http.Handler with the span
-// context injected into the request context.
-func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
- requestStartTime := time.Now()
- for _, f := range h.filters {
- if !f(r) {
- // Simply pass through to the handler if a filter rejects the request
- next.ServeHTTP(w, r)
- return
- }
- }
-
- ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
- opts := []trace.SpanStartOption{
- trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r, semconv.RequestTraceAttrsOpts{})...),
- }
-
- opts = append(opts, h.spanStartOptions...)
- if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
- opts = append(opts, trace.WithNewRoot())
- // Linking incoming span context if any for public endpoint.
- if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() {
- opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s}))
- }
- }
-
- tracer := h.tracer
-
- if tracer == nil {
- if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
- tracer = newTracer(span.TracerProvider())
- } else {
- tracer = newTracer(otel.GetTracerProvider())
- }
- }
-
- if startTime := StartTimeFromContext(ctx); !startTime.IsZero() {
- opts = append(opts, trace.WithTimestamp(startTime))
- requestStartTime = startTime
- }
-
- ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
- defer span.End()
-
- readRecordFunc := func(int64) {}
- if h.readEvent {
- readRecordFunc = func(n int64) {
- span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n)))
- }
- }
-
- // if request body is nil or NoBody, we don't want to mutate the body as it
- // will affect the identity of it in an unforeseeable way because we assert
- // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
- bw := request.NewBodyWrapper(r.Body, readRecordFunc)
- if r.Body != nil && r.Body != http.NoBody {
- r.Body = bw
- }
-
- writeRecordFunc := func(int64) {}
- if h.writeEvent {
- writeRecordFunc = func(n int64) {
- span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n)))
- }
- }
-
- rww := request.NewRespWriterWrapper(w, writeRecordFunc)
-
- // Wrap w to use our ResponseWriter methods while also exposing
- // other interfaces that w may implement (http.CloseNotifier,
- // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom).
-
- w = httpsnoop.Wrap(w, httpsnoop.Hooks{
- Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc {
- return rww.Header
- },
- Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
- return rww.Write
- },
- WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
- return rww.WriteHeader
- },
- Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc {
- return rww.Flush
- },
- })
-
- labeler, found := LabelerFromContext(ctx)
- if !found {
- ctx = ContextWithLabeler(ctx, labeler)
- }
-
- r = r.WithContext(ctx)
- next.ServeHTTP(w, r)
-
- if r.Pattern != "" {
- span.SetName(h.spanNameFormatter(h.operation, r))
- }
-
- statusCode := rww.StatusCode()
- bytesWritten := rww.BytesWritten()
- span.SetStatus(h.semconv.Status(statusCode))
- span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
- StatusCode: statusCode,
- ReadBytes: bw.BytesRead(),
- ReadError: bw.Error(),
- WriteBytes: bytesWritten,
- WriteError: rww.Error(),
- })...)
-
- // Use floating point division here for higher precision (instead of Millisecond method).
- elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
-
- metricAttributes := semconv.MetricAttributes{
- Req: r,
- StatusCode: statusCode,
- AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...),
- }
-
- h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{
- ServerName: h.server,
- ResponseSize: bytesWritten,
- MetricAttributes: metricAttributes,
- MetricData: semconv.MetricData{
- RequestSize: bw.BytesRead(),
- ElapsedTime: elapsedTime,
- },
- })
-}
-
-func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
- var attributeForRequest []attribute.KeyValue
- if h.metricAttributesFn != nil {
- attributeForRequest = h.metricAttributesFn(r)
- }
- return attributeForRequest
-}
-
-// WithRouteTag annotates spans and metrics with the provided route name
-// with HTTP route attribute.
-func WithRouteTag(route string, h http.Handler) http.Handler {
- attr := semconv.NewHTTPServer(nil).Route(route)
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- span := trace.SpanFromContext(r.Context())
- span.SetAttributes(attr)
-
- labeler, _ := LabelerFromContext(r.Context())
- labeler.Add(attr)
-
- h.ServeHTTP(w, r)
- })
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
deleted file mode 100644
index d032aa841..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/request/body_wrapper.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package request provides types and functionality to handle HTTP request
-// handling.
-package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
-
-import (
- "io"
- "sync"
-)
-
-var _ io.ReadCloser = &BodyWrapper{}
-
-// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
-// of bytes read and the last error.
-type BodyWrapper struct {
- io.ReadCloser
- OnRead func(n int64) // must not be nil
-
- mu sync.Mutex
- read int64
- err error
-}
-
-// NewBodyWrapper creates a new BodyWrapper.
-//
-// The onRead attribute is a callback that will be called every time the data
-// is read, with the number of bytes being read.
-func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper {
- return &BodyWrapper{
- ReadCloser: body,
- OnRead: onRead,
- }
-}
-
-// Read reads the data from the io.ReadCloser, and stores the number of bytes
-// read and the error.
-func (w *BodyWrapper) Read(b []byte) (int, error) {
- n, err := w.ReadCloser.Read(b)
- n1 := int64(n)
-
- w.updateReadData(n1, err)
- w.OnRead(n1)
- return n, err
-}
-
-func (w *BodyWrapper) updateReadData(n int64, err error) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- w.read += n
- if err != nil {
- w.err = err
- }
-}
-
-// Close closes the io.ReadCloser.
-func (w *BodyWrapper) Close() error {
- return w.ReadCloser.Close()
-}
-
-// BytesRead returns the number of bytes read up to this point.
-func (w *BodyWrapper) BytesRead() int64 {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- return w.read
-}
-
-// Error returns the last error.
-func (w *BodyWrapper) Error() error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- return w.err
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go
deleted file mode 100644
index 9e00dd2fc..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
-
-// Generate request package:
-//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper.go.tmpl "--data={}" --out=body_wrapper.go
-//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper_test.go.tmpl "--data={}" --out=body_wrapper_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper.go.tmpl "--data={}" --out=resp_writer_wrapper.go
-//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper_test.go.tmpl "--data={}" --out=resp_writer_wrapper_test.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
deleted file mode 100644
index ca2e4c14c..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/request/resp_writer_wrapper.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
-
-import (
- "net/http"
- "sync"
-)
-
-var _ http.ResponseWriter = &RespWriterWrapper{}
-
-// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of
-// bytes written, the last error, and to catch the first written statusCode.
-// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
-// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc)
-// that may be useful when using it in real life situations.
-type RespWriterWrapper struct {
- http.ResponseWriter
- OnWrite func(n int64) // must not be nil
-
- mu sync.RWMutex
- written int64
- statusCode int
- err error
- wroteHeader bool
-}
-
-// NewRespWriterWrapper creates a new RespWriterWrapper.
-//
-// The onWrite attribute is a callback that will be called every time the data
-// is written, with the number of bytes that were written.
-func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper {
- return &RespWriterWrapper{
- ResponseWriter: w,
- OnWrite: onWrite,
- statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
- }
-}
-
-// Write writes the bytes array into the [ResponseWriter], and tracks the
-// number of bytes written and last error.
-func (w *RespWriterWrapper) Write(p []byte) (int, error) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- if !w.wroteHeader {
- w.writeHeader(http.StatusOK)
- }
-
- n, err := w.ResponseWriter.Write(p)
- n1 := int64(n)
- w.OnWrite(n1)
- w.written += n1
- w.err = err
- return n, err
-}
-
-// WriteHeader persists initial statusCode for span attribution.
-// All calls to WriteHeader will be propagated to the underlying ResponseWriter
-// and will persist the statusCode from the first call.
-// Blocking consecutive calls to WriteHeader alters expected behavior and will
-// remove warning logs from net/http where developers will notice incorrect handler implementations.
-func (w *RespWriterWrapper) WriteHeader(statusCode int) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- w.writeHeader(statusCode)
-}
-
-// writeHeader persists the status code for span attribution, and propagates
-// the call to the underlying ResponseWriter.
-// It does not acquire a lock, and therefore assumes that is being handled by a
-// parent method.
-func (w *RespWriterWrapper) writeHeader(statusCode int) {
- if !w.wroteHeader {
- w.wroteHeader = true
- w.statusCode = statusCode
- }
- w.ResponseWriter.WriteHeader(statusCode)
-}
-
-// Flush implements [http.Flusher].
-func (w *RespWriterWrapper) Flush() {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- if !w.wroteHeader {
- w.writeHeader(http.StatusOK)
- }
-
- if f, ok := w.ResponseWriter.(http.Flusher); ok {
- f.Flush()
- }
-}
-
-// BytesWritten returns the number of bytes written.
-func (w *RespWriterWrapper) BytesWritten() int64 {
- w.mu.RLock()
- defer w.mu.RUnlock()
-
- return w.written
-}
-
-// StatusCode returns the HTTP status code that was sent.
-func (w *RespWriterWrapper) StatusCode() int {
- w.mu.RLock()
- defer w.mu.RUnlock()
-
- return w.statusCode
-}
-
-// Error returns the last error.
-func (w *RespWriterWrapper) Error() error {
- w.mu.RLock()
- defer w.mu.RUnlock()
-
- return w.err
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
deleted file mode 100644
index 7cb9693d9..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconv/env.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
-
-import (
- "context"
- "fmt"
- "net/http"
- "os"
- "strings"
- "sync"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/metric"
-)
-
-// OTelSemConvStabilityOptIn is an environment variable.
-// That can be set to "http/dup" to keep getting the old HTTP semantic conventions.
-const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN"
-
-type ResponseTelemetry struct {
- StatusCode int
- ReadBytes int64
- ReadError error
- WriteBytes int64
- WriteError error
-}
-
-type HTTPServer struct {
- duplicate bool
-
- // Old metrics
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- serverLatencyMeasure metric.Float64Histogram
-
- // New metrics
- requestBodySizeHistogram metric.Int64Histogram
- responseBodySizeHistogram metric.Int64Histogram
- requestDurationHistogram metric.Float64Histogram
-}
-
-// RequestTraceAttrs returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue {
- attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts)
- if s.duplicate {
- return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs)
- }
- return attrs
-}
-
-func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue {
- if s.duplicate {
- return []attribute.KeyValue{
- OldHTTPServer{}.NetworkTransportAttr(network),
- CurrentHTTPServer{}.NetworkTransportAttr(network),
- }
- }
- return []attribute.KeyValue{
- CurrentHTTPServer{}.NetworkTransportAttr(network),
- }
-}
-
-// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
-//
-// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
-func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
- attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp)
- if s.duplicate {
- return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs)
- }
- return attrs
-}
-
-// Route returns the attribute for the route.
-func (s HTTPServer) Route(route string) attribute.KeyValue {
- return CurrentHTTPServer{}.Route(route)
-}
-
-// Status returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func (s HTTPServer) Status(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 500 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
-
-type ServerMetricData struct {
- ServerName string
- ResponseSize int64
-
- MetricData
- MetricAttributes
-}
-
-type MetricAttributes struct {
- Req *http.Request
- StatusCode int
- AdditionalAttributes []attribute.KeyValue
-}
-
-type MetricData struct {
- RequestSize int64
-
- // The request duration, in milliseconds
- ElapsedTime float64
-}
-
-var (
- metricAddOptionPool = &sync.Pool{
- New: func() interface{} {
- return &[]metric.AddOption{}
- },
- }
-
- metricRecordOptionPool = &sync.Pool{
- New: func() interface{} {
- return &[]metric.RecordOption{}
- },
- }
-)
-
-func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) {
- if s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil {
- attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
- o := metric.WithAttributeSet(attribute.NewSet(attributes...))
- recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption)
- *recordOpts = append(*recordOpts, o)
- s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...)
- s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...)
- s.requestDurationHistogram.Record(ctx, md.ElapsedTime/1000.0, o)
- *recordOpts = (*recordOpts)[:0]
- metricRecordOptionPool.Put(recordOpts)
- }
-
- if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil {
- attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
- o := metric.WithAttributeSet(attribute.NewSet(attributes...))
- addOpts := metricAddOptionPool.Get().(*[]metric.AddOption)
- *addOpts = append(*addOpts, o)
- s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...)
- s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...)
- s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
- *addOpts = (*addOpts)[:0]
- metricAddOptionPool.Put(addOpts)
- }
-}
-
-// hasOptIn returns true if the comma-separated version string contains the
-// exact optIn value.
-func hasOptIn(version, optIn string) bool {
- for _, v := range strings.Split(version, ",") {
- if strings.TrimSpace(v) == optIn {
- return true
- }
- }
- return false
-}
-
-func NewHTTPServer(meter metric.Meter) HTTPServer {
- env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
- duplicate := hasOptIn(env, "http/dup")
- server := HTTPServer{
- duplicate: duplicate,
- }
- server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter)
- if duplicate {
- server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter)
- }
- return server
-}
-
-type HTTPClient struct {
- duplicate bool
-
- // old metrics
- requestBytesCounter metric.Int64Counter
- responseBytesCounter metric.Int64Counter
- latencyMeasure metric.Float64Histogram
-
- // new metrics
- requestBodySize metric.Int64Histogram
- requestDuration metric.Float64Histogram
-}
-
-func NewHTTPClient(meter metric.Meter) HTTPClient {
- env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn))
- duplicate := hasOptIn(env, "http/dup")
- client := HTTPClient{
- duplicate: duplicate,
- }
- client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter)
- if duplicate {
- client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter)
- }
-
- return client
-}
-
-// RequestTraceAttrs returns attributes for an HTTP request made by a client.
-func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.RequestTraceAttrs(req)
- if c.duplicate {
- return OldHTTPClient{}.RequestTraceAttrs(req, attrs)
- }
- return attrs
-}
-
-// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
-func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp)
- if c.duplicate {
- return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs)
- }
- return attrs
-}
-
-func (c HTTPClient) Status(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 400 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
-
-func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
- return CurrentHTTPClient{}.ErrorType(err)
-}
-
-type MetricOpts struct {
- measurement metric.MeasurementOption
- addOptions metric.AddOption
-}
-
-func (o MetricOpts) MeasurementOption() metric.MeasurementOption {
- return o.measurement
-}
-
-func (o MetricOpts) AddOptions() metric.AddOption {
- return o.addOptions
-}
-
-func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts {
- opts := map[string]MetricOpts{}
-
- attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
- set := metric.WithAttributeSet(attribute.NewSet(attributes...))
- opts["new"] = MetricOpts{
- measurement: set,
- addOptions: set,
- }
-
- if c.duplicate {
- attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes)
- set := metric.WithAttributeSet(attribute.NewSet(attributes...))
- opts["old"] = MetricOpts{
- measurement: set,
- addOptions: set,
- }
- }
-
- return opts
-}
-
-func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) {
- if s.requestBodySize == nil || s.requestDuration == nil {
- // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
- return
- }
-
- s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption())
- s.requestDuration.Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption())
-
- if s.duplicate {
- s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions())
- s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption())
- }
-}
-
-func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) {
- if s.responseBytesCounter == nil {
- // This will happen if an HTTPClient{} is used instead of NewHTTPClient().
- return
- }
-
- s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions())
-}
-
-func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue {
- attrs := CurrentHTTPClient{}.TraceAttributes(host)
- if s.duplicate {
- return OldHTTPClient{}.TraceAttributes(host, attrs)
- }
-
- return attrs
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
deleted file mode 100644
index f2cf8a152..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
-
-// Generate semconv package:
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
deleted file mode 100644
index 53976b0d5..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go
+++ /dev/null
@@ -1,573 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconv/httpconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv provides OpenTelemetry semantic convention types and
-// functionality.
-package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "slices"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
-)
-
-type RequestTraceAttrsOpts struct {
- // If set, this is used as value for the "http.client_ip" attribute.
- HTTPClientIP string
-}
-
-type CurrentHTTPServer struct{}
-
-// RequestTraceAttrs returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue {
- count := 3 // ServerAddress, Method, Scheme
-
- var host string
- var p int
- if server == "" {
- host, p = SplitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = SplitHostPort(server)
- if p < 0 {
- _, p = SplitHostPort(req.Host)
- }
- }
-
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- count++
- }
-
- method, methodOriginal := n.method(req.Method)
- if methodOriginal != (attribute.KeyValue{}) {
- count++
- }
-
- scheme := n.scheme(req.TLS != nil)
-
- peer, peerPort := SplitHostPort(req.RemoteAddr)
- if peer != "" {
- // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
- // file-path that would be interpreted with a sock family.
- count++
- if peerPort > 0 {
- count++
- }
- }
-
- useragent := req.UserAgent()
- if useragent != "" {
- count++
- }
-
- // For client IP, use, in order:
- // 1. The value passed in the options
- // 2. The value in the X-Forwarded-For header
- // 3. The peer address
- clientIP := opts.HTTPClientIP
- if clientIP == "" {
- clientIP = serverClientIP(req.Header.Get("X-Forwarded-For"))
- if clientIP == "" {
- clientIP = peer
- }
- }
- if clientIP != "" {
- count++
- }
-
- if req.URL != nil && req.URL.Path != "" {
- count++
- }
-
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" && protoName != "http" {
- count++
- }
- if protoVersion != "" {
- count++
- }
-
- route := httpRoute(req.Pattern)
- if route != "" {
- count++
- }
-
- attrs := make([]attribute.KeyValue, 0, count)
- attrs = append(attrs,
- semconvNew.ServerAddress(host),
- method,
- scheme,
- )
-
- if hostPort > 0 {
- attrs = append(attrs, semconvNew.ServerPort(hostPort))
- }
- if methodOriginal != (attribute.KeyValue{}) {
- attrs = append(attrs, methodOriginal)
- }
-
- if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" {
- // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
- // file-path that would be interpreted with a sock family.
- attrs = append(attrs, semconvNew.NetworkPeerAddress(peer))
- if peerPort > 0 {
- attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort))
- }
- }
-
- if useragent != "" {
- attrs = append(attrs, semconvNew.UserAgentOriginal(useragent))
- }
-
- if clientIP != "" {
- attrs = append(attrs, semconvNew.ClientAddress(clientIP))
- }
-
- if req.URL != nil && req.URL.Path != "" {
- attrs = append(attrs, semconvNew.URLPath(req.URL.Path))
- }
-
- if protoName != "" && protoName != "http" {
- attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
- }
-
- if route != "" {
- attrs = append(attrs, n.Route(route))
- }
-
- return attrs
-}
-
-func (n CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue {
- switch network {
- case "tcp", "tcp4", "tcp6":
- return semconvNew.NetworkTransportTCP
- case "udp", "udp4", "udp6":
- return semconvNew.NetworkTransportUDP
- case "unix", "unixgram", "unixpacket":
- return semconvNew.NetworkTransportUnix
- default:
- return semconvNew.NetworkTransportPipe
- }
-}
-
-func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) {
- if method == "" {
- return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
- }
- if attr, ok := methodLookup[method]; ok {
- return attr, attribute.KeyValue{}
- }
-
- orig := semconvNew.HTTPRequestMethodOriginal(method)
- if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
- return attr, orig
- }
- return semconvNew.HTTPRequestMethodGet, orig
-}
-
-func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return semconvNew.URLScheme("https")
- }
- return semconvNew.URLScheme("http")
-}
-
-// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP
-// response.
-//
-// If any of the fields in the ResponseTelemetry are not set the attribute will
-// be omitted.
-func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue {
- var count int
-
- if resp.ReadBytes > 0 {
- count++
- }
- if resp.WriteBytes > 0 {
- count++
- }
- if resp.StatusCode > 0 {
- count++
- }
-
- attributes := make([]attribute.KeyValue, 0, count)
-
- if resp.ReadBytes > 0 {
- attributes = append(attributes,
- semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)),
- )
- }
- if resp.WriteBytes > 0 {
- attributes = append(attributes,
- semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)),
- )
- }
- if resp.StatusCode > 0 {
- attributes = append(attributes,
- semconvNew.HTTPResponseStatusCode(resp.StatusCode),
- )
- }
-
- return attributes
-}
-
-// Route returns the attribute for the route.
-func (n CurrentHTTPServer) Route(route string) attribute.KeyValue {
- return semconvNew.HTTPRoute(route)
-}
-
-func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{}
- }
-
- var err error
- requestBodySizeHistogram, err := meter.Int64Histogram(
- semconvNew.HTTPServerRequestBodySizeName,
- metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription),
- )
- handleErr(err)
-
- responseBodySizeHistogram, err := meter.Int64Histogram(
- semconvNew.HTTPServerResponseBodySizeName,
- metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription),
- )
- handleErr(err)
- requestDurationHistogram, err := meter.Float64Histogram(
- semconvNew.HTTPServerRequestDurationName,
- metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit),
- metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription),
- metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
- )
- handleErr(err)
-
- return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram
-}
-
-func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- num := len(additionalAttributes) + 3
- var host string
- var p int
- if server == "" {
- host, p = SplitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = SplitHostPort(server)
- if p < 0 {
- _, p = SplitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- num++
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- num++
- }
- if protoVersion != "" {
- num++
- }
-
- if statusCode > 0 {
- num++
- }
-
- attributes := slices.Grow(additionalAttributes, num)
- attributes = append(attributes,
- semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)),
- n.scheme(req.TLS != nil),
- semconvNew.ServerAddress(host))
-
- if hostPort > 0 {
- attributes = append(attributes, semconvNew.ServerPort(hostPort))
- }
- if protoName != "" {
- attributes = append(attributes, semconvNew.NetworkProtocolName(protoName))
- }
- if protoVersion != "" {
- attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode))
- }
- return attributes
-}
-
-type CurrentHTTPClient struct{}
-
-// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
-func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
- /*
- below attributes are returned:
- - http.request.method
- - http.request.method.original
- - url.full
- - server.address
- - server.port
- - network.protocol.name
- - network.protocol.version
- */
- numOfAttributes := 3 // URL, server address, proto, and method.
-
- var urlHost string
- if req.URL != nil {
- urlHost = req.URL.Host
- }
- var requestHost string
- var requestPort int
- for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
- requestHost, requestPort = SplitHostPort(hostport)
- if requestHost != "" || requestPort > 0 {
- break
- }
- }
-
- eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
- if eligiblePort > 0 {
- numOfAttributes++
- }
- useragent := req.UserAgent()
- if useragent != "" {
- numOfAttributes++
- }
-
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" && protoName != "http" {
- numOfAttributes++
- }
- if protoVersion != "" {
- numOfAttributes++
- }
-
- method, originalMethod := n.method(req.Method)
- if originalMethod != (attribute.KeyValue{}) {
- numOfAttributes++
- }
-
- attrs := make([]attribute.KeyValue, 0, numOfAttributes)
-
- attrs = append(attrs, method)
- if originalMethod != (attribute.KeyValue{}) {
- attrs = append(attrs, originalMethod)
- }
-
- var u string
- if req.URL != nil {
- // Remove any username/password info that may be in the URL.
- userinfo := req.URL.User
- req.URL.User = nil
- u = req.URL.String()
- // Restore any username/password info that was removed.
- req.URL.User = userinfo
- }
- attrs = append(attrs, semconvNew.URLFull(u))
-
- attrs = append(attrs, semconvNew.ServerAddress(requestHost))
- if eligiblePort > 0 {
- attrs = append(attrs, semconvNew.ServerPort(eligiblePort))
- }
-
- if protoName != "" && protoName != "http" {
- attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
- }
-
- return attrs
-}
-
-// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
-func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
- /*
- below attributes are returned:
- - http.response.status_code
- - error.type
- */
- var count int
- if resp.StatusCode > 0 {
- count++
- }
-
- if isErrorStatusCode(resp.StatusCode) {
- count++
- }
-
- attrs := make([]attribute.KeyValue, 0, count)
- if resp.StatusCode > 0 {
- attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode))
- }
-
- if isErrorStatusCode(resp.StatusCode) {
- errorType := strconv.Itoa(resp.StatusCode)
- attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType))
- }
- return attrs
-}
-
-func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue {
- t := reflect.TypeOf(err)
- var value string
- if t.PkgPath() == "" && t.Name() == "" {
- // Likely a builtin type.
- value = t.String()
- } else {
- value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
- }
-
- if value == "" {
- return semconvNew.ErrorTypeOther
- }
-
- return semconvNew.ErrorTypeKey.String(value)
-}
-
-func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
- if method == "" {
- return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
- }
- if attr, ok := methodLookup[method]; ok {
- return attr, attribute.KeyValue{}
- }
-
- orig := semconvNew.HTTPRequestMethodOriginal(method)
- if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
- return attr, orig
- }
- return semconvNew.HTTPRequestMethodGet, orig
-}
-
-func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Histogram{}, noop.Float64Histogram{}
- }
-
- var err error
- requestBodySize, err := meter.Int64Histogram(
- semconvNew.HTTPClientRequestBodySizeName,
- metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit),
- metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription),
- )
- handleErr(err)
-
- requestDuration, err := meter.Float64Histogram(
- semconvNew.HTTPClientRequestDurationName,
- metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit),
- metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription),
- metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10),
- )
- handleErr(err)
-
- return requestBodySize, requestDuration
-}
-
-func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- num := len(additionalAttributes) + 2
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- var requestHost string
- var requestPort int
- for _, hostport := range []string{h, req.Header.Get("Host")} {
- requestHost, requestPort = SplitHostPort(hostport)
- if requestHost != "" || requestPort > 0 {
- break
- }
- }
-
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
- if port > 0 {
- num++
- }
-
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- num++
- }
- if protoVersion != "" {
- num++
- }
-
- if statusCode > 0 {
- num++
- }
-
- attributes := slices.Grow(additionalAttributes, num)
- attributes = append(attributes,
- semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)),
- semconvNew.ServerAddress(requestHost),
- n.scheme(req),
- )
-
- if port > 0 {
- attributes = append(attributes, semconvNew.ServerPort(port))
- }
- if protoName != "" {
- attributes = append(attributes, semconvNew.NetworkProtocolName(protoName))
- }
- if protoVersion != "" {
- attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode))
- }
- return attributes
-}
-
-// TraceAttributes returns attributes for httptrace.
-func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue {
- return []attribute.KeyValue{
- semconvNew.ServerAddress(host),
- }
-}
-
-func (n CurrentHTTPClient) scheme(req *http.Request) attribute.KeyValue {
- if req.URL != nil && req.URL.Scheme != "" {
- return semconvNew.URLScheme(req.URL.Scheme)
- }
- if req.TLS != nil {
- return semconvNew.URLScheme("https")
- }
- return semconvNew.URLScheme("http")
-}
-
-func isErrorStatusCode(code int) bool {
- return code >= 400 || code < 100
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
deleted file mode 100644
index bc1f7751d..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconv/util.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
-
-import (
- "net"
- "net/http"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
-)
-
-// SplitHostPort splits a network address hostport of the form "host",
-// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
-// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
-// port.
-//
-// An empty host is returned if it is not provided or unparsable. A negative
-// port is returned if it is not provided or unparsable.
-func SplitHostPort(hostport string) (host string, port int) {
- port = -1
-
- if strings.HasPrefix(hostport, "[") {
- addrEnd := strings.LastIndexByte(hostport, ']')
- if addrEnd < 0 {
- // Invalid hostport.
- return
- }
- if i := strings.LastIndexByte(hostport[addrEnd:], ':'); i < 0 {
- host = hostport[1:addrEnd]
- return
- }
- } else {
- if i := strings.LastIndexByte(hostport, ':'); i < 0 {
- host = hostport
- return
- }
- }
-
- host, pStr, err := net.SplitHostPort(hostport)
- if err != nil {
- return
- }
-
- p, err := strconv.ParseUint(pStr, 10, 16)
- if err != nil {
- return
- }
- return host, int(p) // nolint: gosec // Byte size checked 16 above.
-}
-
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
- if https {
- if port > 0 && port != 443 {
- return port
- }
- } else {
- if port > 0 && port != 80 {
- return port
- }
- }
- return -1
-}
-
-func serverClientIP(xForwardedFor string) string {
- if idx := strings.IndexByte(xForwardedFor, ','); idx >= 0 {
- xForwardedFor = xForwardedFor[:idx]
- }
- return xForwardedFor
-}
-
-func httpRoute(pattern string) string {
- if idx := strings.IndexByte(pattern, '/'); idx >= 0 {
- return pattern[idx:]
- }
- return ""
-}
-
-func netProtocol(proto string) (name string, version string) {
- name, version, _ = strings.Cut(proto, "/")
- switch name {
- case "HTTP":
- name = "http"
- case "QUIC":
- name = "quic"
- case "SPDY":
- name = "spdy"
- default:
- name = strings.ToLower(name)
- }
- return name, version
-}
-
-var methodLookup = map[string]attribute.KeyValue{
- http.MethodConnect: semconvNew.HTTPRequestMethodConnect,
- http.MethodDelete: semconvNew.HTTPRequestMethodDelete,
- http.MethodGet: semconvNew.HTTPRequestMethodGet,
- http.MethodHead: semconvNew.HTTPRequestMethodHead,
- http.MethodOptions: semconvNew.HTTPRequestMethodOptions,
- http.MethodPatch: semconvNew.HTTPRequestMethodPatch,
- http.MethodPost: semconvNew.HTTPRequestMethodPost,
- http.MethodPut: semconvNew.HTTPRequestMethodPut,
- http.MethodTrace: semconvNew.HTTPRequestMethodTrace,
-}
-
-func handleErr(err error) {
- if err != nil {
- otel.Handle(err)
- }
-}
-
-func standardizeHTTPMethod(method string) string {
- method = strings.ToUpper(method)
- switch method {
- case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
- default:
- method = "_OTHER"
- }
- return method
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
deleted file mode 100644
index ba7fccf1e..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconv/v120.0.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
-
-import (
- "errors"
- "io"
- "net/http"
- "slices"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/noop"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-type OldHTTPServer struct{}
-
-// RequestTraceAttrs returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs)
-}
-
-func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue {
- return semconvutil.NetTransport(network)
-}
-
-// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response.
-//
-// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted.
-func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue {
- if resp.ReadBytes > 0 {
- attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes)))
- }
- if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) {
- // This is not in the semantic conventions, but is historically provided
- attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error()))
- }
- if resp.WriteBytes > 0 {
- attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes)))
- }
- if resp.StatusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode))
- }
- if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) {
- // This is not in the semantic conventions, but is historically provided
- attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error()))
- }
-
- return attributes
-}
-
-// Route returns the attribute for the route.
-func (o OldHTTPServer) Route(route string) attribute.KeyValue {
- return semconv.HTTPRoute(route)
-}
-
-// HTTPStatusCode returns the attribute for the HTTP status code.
-// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added.
-func HTTPStatusCode(status int) attribute.KeyValue {
- return semconv.HTTPStatusCode(status)
-}
-
-// Server HTTP metrics.
-const (
- serverRequestSize = "http.server.request.size" // Incoming request bytes total
- serverResponseSize = "http.server.response.size" // Incoming response bytes total
- serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
-)
-
-func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
- }
- var err error
- requestBytesCounter, err := meter.Int64Counter(
- serverRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- responseBytesCounter, err := meter.Int64Counter(
- serverResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- serverLatencyMeasure, err := meter.Float64Histogram(
- serverDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of inbound HTTP requests."),
- )
- handleErr(err)
-
- return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
-}
-
-func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- n := len(additionalAttributes) + 3
- var host string
- var p int
- if server == "" {
- host, p = SplitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = SplitHostPort(server)
- if p < 0 {
- _, p = SplitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- if statusCode > 0 {
- n++
- }
-
- attributes := slices.Grow(additionalAttributes, n)
- attributes = append(attributes,
- semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
- o.scheme(req.TLS != nil),
- semconv.NetHostName(host))
-
- if hostPort > 0 {
- attributes = append(attributes, semconv.NetHostPort(hostPort))
- }
- if protoName != "" {
- attributes = append(attributes, semconv.NetProtocolName(protoName))
- }
- if protoVersion != "" {
- attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- return attributes
-}
-
-func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return semconv.HTTPSchemeHTTPS
- }
- return semconv.HTTPSchemeHTTP
-}
-
-type OldHTTPClient struct{}
-
-func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPClientRequest(req, attrs)
-}
-
-func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- return semconvutil.HTTPClientResponse(resp, attrs)
-}
-
-func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- http.status_code int
- net.peer.name string
- net.peer.port int
- */
-
- n := 2 // method, peer name.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- var requestHost string
- var requestPort int
- for _, hostport := range []string{h, req.Header.Get("Host")} {
- requestHost, requestPort = SplitHostPort(hostport)
- if requestHost != "" || requestPort > 0 {
- break
- }
- }
-
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
- if port > 0 {
- n++
- }
-
- if statusCode > 0 {
- n++
- }
-
- attributes := slices.Grow(additionalAttributes, n)
- attributes = append(attributes,
- semconv.HTTPMethod(standardizeHTTPMethod(req.Method)),
- semconv.NetPeerName(requestHost),
- )
-
- if port > 0 {
- attributes = append(attributes, semconv.NetPeerPort(port))
- }
-
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- return attributes
-}
-
-// Client HTTP metrics.
-const (
- clientRequestSize = "http.client.request.size" // Incoming request bytes total
- clientResponseSize = "http.client.response.size" // Incoming response bytes total
- clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds
-)
-
-func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
- if meter == nil {
- return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
- }
- requestBytesCounter, err := meter.Int64Counter(
- clientRequestSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request messages."),
- )
- handleErr(err)
-
- responseBytesCounter, err := meter.Int64Counter(
- clientResponseSize,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response messages."),
- )
- handleErr(err)
-
- latencyMeasure, err := meter.Float64Histogram(
- clientDuration,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of outbound HTTP requests."),
- )
- handleErr(err)
-
- return requestBytesCounter, responseBytesCounter, latencyMeasure
-}
-
-// TraceAttributes returns attributes for httptrace.
-func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue {
- return append(attrs, semconv.NetHostName(host))
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
deleted file mode 100644
index 7aa5f99e8..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-// Generate semconvutil package:
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
deleted file mode 100644
index b99735479..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
+++ /dev/null
@@ -1,594 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/httpconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconvutil provides OpenTelemetry semantic convention utilities.
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-import (
- "fmt"
- "net/http"
- "slices"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-type HTTPServerRequestOptions struct {
- // If set, this is used as value for the "http.client_ip" attribute.
- HTTPClientIP string
-}
-
-// HTTPClientResponse returns trace attributes for an HTTP response received by a
-// client from a server. It will return the following attributes if the related
-// values are defined in resp: "http.status.code",
-// "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// HTTPClientResponse(resp, ClientRequest(resp.Request)))
-func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ClientResponse(resp, attrs)
-}
-
-// HTTPClientRequest returns trace attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.url", "http.method",
-// "net.peer.name". The following attributes are returned if the related values
-// are defined in req: "net.peer.port", "user_agent.original",
-// "http.request_content_length".
-func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ClientRequest(req, attrs)
-}
-
-// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.method", "net.peer.name".
-// The following attributes are returned if the
-// related values are defined in req: "net.peer.port".
-func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue {
- return hc.ClientRequestMetrics(req)
-}
-
-// HTTPClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func HTTPClientStatus(code int) (codes.Code, string) {
- return hc.ClientStatus(code)
-}
-
-// HTTPServerRequest returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.target", "net.host.name". The following attributes are returned if
-// they related values are defined in req: "net.host.port", "net.sock.peer.addr",
-// "net.sock.peer.port", "user_agent.original", "http.client_ip".
-func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
- return hc.ServerRequest(server, req, opts, attrs)
-}
-
-// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "net.host.name". The following attributes are returned if they related
-// values are defined in req: "net.host.port".
-func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- return hc.ServerRequestMetrics(server, req)
-}
-
-// HTTPServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func HTTPServerStatus(code int) (codes.Code, string) {
- return hc.ServerStatus(code)
-}
-
-// httpConv are the HTTP semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type httpConv struct {
- NetConv *netConv
-
- HTTPClientIPKey attribute.Key
- HTTPMethodKey attribute.Key
- HTTPRequestContentLengthKey attribute.Key
- HTTPResponseContentLengthKey attribute.Key
- HTTPRouteKey attribute.Key
- HTTPSchemeHTTP attribute.KeyValue
- HTTPSchemeHTTPS attribute.KeyValue
- HTTPStatusCodeKey attribute.Key
- HTTPTargetKey attribute.Key
- HTTPURLKey attribute.Key
- UserAgentOriginalKey attribute.Key
-}
-
-var hc = &httpConv{
- NetConv: nc,
-
- HTTPClientIPKey: semconv.HTTPClientIPKey,
- HTTPMethodKey: semconv.HTTPMethodKey,
- HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
- HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
- HTTPRouteKey: semconv.HTTPRouteKey,
- HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
- HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
- HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
- HTTPTargetKey: semconv.HTTPTargetKey,
- HTTPURLKey: semconv.HTTPURLKey,
- UserAgentOriginalKey: semconv.UserAgentOriginalKey,
-}
-
-// ClientResponse returns attributes for an HTTP response received by a client
-// from a server. The following attributes are returned if the related values
-// are defined in resp: "http.status.code", "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// ClientResponse(resp, ClientRequest(resp.Request))
-func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.status_code int
- http.response_content_length int
- */
- var n int
- if resp.StatusCode > 0 {
- n++
- }
- if resp.ContentLength > 0 {
- n++
- }
- if n == 0 {
- return attrs
- }
-
- attrs = slices.Grow(attrs, n)
- if resp.StatusCode > 0 {
- attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
- }
- if resp.ContentLength > 0 {
- attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
- }
- return attrs
-}
-
-// ClientRequest returns attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.url", "http.method",
-// "net.peer.name". The following attributes are returned if the related values
-// are defined in req: "net.peer.port", "user_agent.original",
-// "http.request_content_length", "user_agent.original".
-func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- user_agent.original string
- http.url string
- net.peer.name string
- net.peer.port int
- http.request_content_length int
- */
-
- /* The following semantic conventions are not returned:
- http.status_code This requires the response. See ClientResponse.
- http.response_content_length This requires the response. See ClientResponse.
- net.sock.family This requires the socket used.
- net.sock.peer.addr This requires the socket used.
- net.sock.peer.name This requires the socket used.
- net.sock.peer.port This requires the socket used.
- http.resend_count This is something outside of a single request.
- net.protocol.name The value is the Request is ignored, and the go client will always use "http".
- net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0.
- */
- n := 3 // URL, peer name, proto, and method.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- if req.ContentLength > 0 {
- n++
- }
-
- attrs = slices.Grow(attrs, n)
- attrs = append(attrs, c.method(req.Method))
-
- var u string
- if req.URL != nil {
- // Remove any username/password info that may be in the URL.
- userinfo := req.URL.User
- req.URL.User = nil
- u = req.URL.String()
- // Restore any username/password info that was removed.
- req.URL.User = userinfo
- }
- attrs = append(attrs, c.HTTPURLKey.String(u))
-
- attrs = append(attrs, c.NetConv.PeerName(peer))
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if l := req.ContentLength; l > 0 {
- attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
- }
-
- return attrs
-}
-
-// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.method", "net.peer.name".
-// The following attributes are returned if the related values
-// are defined in req: "net.peer.port".
-func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- net.peer.name string
- net.peer.port int
- */
-
- n := 2 // method, peer name.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer))
-
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- return attrs
-}
-
-// ServerRequest returns attributes for an HTTP request received by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.target", "net.host.name". The following attributes are returned if they
-// related values are defined in req: "net.host.port", "net.sock.peer.addr",
-// "net.sock.peer.port", "user_agent.original", "http.client_ip",
-// "net.protocol.name", "net.protocol.version".
-func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.method string
- http.scheme string
- net.host.name string
- net.host.port int
- net.sock.peer.addr string
- net.sock.peer.port int
- user_agent.original string
- http.client_ip string
- net.protocol.name string Note: not set if the value is "http".
- net.protocol.version string
- http.target string Note: doesn't include the query parameter.
- */
-
- /* The following semantic conventions are not returned:
- http.status_code This requires the response.
- http.request_content_length This requires the len() of body, which can mutate it.
- http.response_content_length This requires the response.
- http.route This is not available.
- net.sock.peer.name This would require a DNS lookup.
- net.sock.host.addr The request doesn't have access to the underlying socket.
- net.sock.host.port The request doesn't have access to the underlying socket.
-
- */
- n := 4 // Method, scheme, proto, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- peer, peerPort := splitHostPort(req.RemoteAddr)
- if peer != "" {
- n++
- if peerPort > 0 {
- n++
- }
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
-
- // For client IP, use, in order:
- // 1. The value passed in the options
- // 2. The value in the X-Forwarded-For header
- // 3. The peer address
- clientIP := opts.HTTPClientIP
- if clientIP == "" {
- clientIP = serverClientIP(req.Header.Get("X-Forwarded-For"))
- if clientIP == "" {
- clientIP = peer
- }
- }
- if clientIP != "" {
- n++
- }
-
- var target string
- if req.URL != nil {
- target = req.URL.Path
- if target != "" {
- n++
- }
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" && protoName != "http" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- attrs = slices.Grow(attrs, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
-
- if peer != "" {
- // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
- // file-path that would be interpreted with a sock family.
- attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
- if peerPort > 0 {
- attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
- }
- }
-
- if useragent != "" {
- attrs = append(attrs, c.UserAgentOriginalKey.String(useragent))
- }
-
- if clientIP != "" {
- attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
- }
-
- if target != "" {
- attrs = append(attrs, c.HTTPTargetKey.String(target))
- }
-
- if protoName != "" && protoName != "http" {
- attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
- }
-
- return attrs
-}
-
-// ServerRequestMetrics returns metric attributes for an HTTP request received
-// by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "net.host.name". The following attributes are returned if they related
-// values are defined in req: "net.host.port".
-func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- /* The following semantic conventions are returned if present:
- http.scheme string
- http.route string
- http.method string
- http.status_code int
- net.host.name string
- net.host.port int
- net.protocol.name string Note: not set if the value is "http".
- net.protocol.version string
- */
-
- n := 3 // Method, scheme, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- protoName, protoVersion := netProtocol(req.Proto)
- if protoName != "" {
- n++
- }
- if protoVersion != "" {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.methodMetric(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
- if protoName != "" {
- attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName))
- }
- if protoVersion != "" {
- attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion))
- }
-
- return attrs
-}
-
-func (c *httpConv) method(method string) attribute.KeyValue {
- if method == "" {
- return c.HTTPMethodKey.String(http.MethodGet)
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) methodMetric(method string) attribute.KeyValue {
- method = strings.ToUpper(method)
- switch method {
- case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
- default:
- method = "_OTHER"
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return c.HTTPSchemeHTTPS
- }
- return c.HTTPSchemeHTTP
-}
-
-func serverClientIP(xForwardedFor string) string {
- if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
- xForwardedFor = xForwardedFor[:idx]
- }
- return xForwardedFor
-}
-
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
- if https {
- if port > 0 && port != 443 {
- return port
- }
- } else {
- if port > 0 && port != 80 {
- return port
- }
- }
- return -1
-}
-
-// Return the request host and port from the first non-empty source.
-func firstHostPort(source ...string) (host string, port int) {
- for _, hostport := range source {
- host, port = splitHostPort(hostport)
- if host != "" || port > 0 {
- break
- }
- }
- return
-}
-
-// ClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func (c *httpConv) ClientStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 400 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func (c *httpConv) ServerStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 500 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
deleted file mode 100644
index df97255e4..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Code generated by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/netconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-import (
- "net"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
-)
-
-// NetTransport returns a trace attribute describing the transport protocol of the
-// passed network. See the net.Dial for information about acceptable network
-// values.
-func NetTransport(network string) attribute.KeyValue {
- return nc.Transport(network)
-}
-
-// netConv are the network semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type netConv struct {
- NetHostNameKey attribute.Key
- NetHostPortKey attribute.Key
- NetPeerNameKey attribute.Key
- NetPeerPortKey attribute.Key
- NetProtocolName attribute.Key
- NetProtocolVersion attribute.Key
- NetSockFamilyKey attribute.Key
- NetSockPeerAddrKey attribute.Key
- NetSockPeerPortKey attribute.Key
- NetSockHostAddrKey attribute.Key
- NetSockHostPortKey attribute.Key
- NetTransportOther attribute.KeyValue
- NetTransportTCP attribute.KeyValue
- NetTransportUDP attribute.KeyValue
- NetTransportInProc attribute.KeyValue
-}
-
-var nc = &netConv{
- NetHostNameKey: semconv.NetHostNameKey,
- NetHostPortKey: semconv.NetHostPortKey,
- NetPeerNameKey: semconv.NetPeerNameKey,
- NetPeerPortKey: semconv.NetPeerPortKey,
- NetProtocolName: semconv.NetProtocolNameKey,
- NetProtocolVersion: semconv.NetProtocolVersionKey,
- NetSockFamilyKey: semconv.NetSockFamilyKey,
- NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
- NetSockPeerPortKey: semconv.NetSockPeerPortKey,
- NetSockHostAddrKey: semconv.NetSockHostAddrKey,
- NetSockHostPortKey: semconv.NetSockHostPortKey,
- NetTransportOther: semconv.NetTransportOther,
- NetTransportTCP: semconv.NetTransportTCP,
- NetTransportUDP: semconv.NetTransportUDP,
- NetTransportInProc: semconv.NetTransportInProc,
-}
-
-func (c *netConv) Transport(network string) attribute.KeyValue {
- switch network {
- case "tcp", "tcp4", "tcp6":
- return c.NetTransportTCP
- case "udp", "udp4", "udp6":
- return c.NetTransportUDP
- case "unix", "unixgram", "unixpacket":
- return c.NetTransportInProc
- default:
- // "ip:*", "ip4:*", and "ip6:*" all are considered other.
- return c.NetTransportOther
- }
-}
-
-// Host returns attributes for a network host address.
-func (c *netConv) Host(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.HostName(h))
- if p > 0 {
- attrs = append(attrs, c.HostPort(p))
- }
- return attrs
-}
-
-func (c *netConv) HostName(name string) attribute.KeyValue {
- return c.NetHostNameKey.String(name)
-}
-
-func (c *netConv) HostPort(port int) attribute.KeyValue {
- return c.NetHostPortKey.Int(port)
-}
-
-func family(network, address string) string {
- switch network {
- case "unix", "unixgram", "unixpacket":
- return "unix"
- default:
- if ip := net.ParseIP(address); ip != nil {
- if ip.To4() == nil {
- return "inet6"
- }
- return "inet"
- }
- }
- return ""
-}
-
-// Peer returns attributes for a network peer address.
-func (c *netConv) Peer(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.PeerName(h))
- if p > 0 {
- attrs = append(attrs, c.PeerPort(p))
- }
- return attrs
-}
-
-func (c *netConv) PeerName(name string) attribute.KeyValue {
- return c.NetPeerNameKey.String(name)
-}
-
-func (c *netConv) PeerPort(port int) attribute.KeyValue {
- return c.NetPeerPortKey.Int(port)
-}
-
-func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue {
- return c.NetSockPeerAddrKey.String(addr)
-}
-
-func (c *netConv) SockPeerPort(port int) attribute.KeyValue {
- return c.NetSockPeerPortKey.Int(port)
-}
-
-// splitHostPort splits a network address hostport of the form "host",
-// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
-// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
-// port.
-//
-// An empty host is returned if it is not provided or unparsable. A negative
-// port is returned if it is not provided or unparsable.
-func splitHostPort(hostport string) (host string, port int) {
- port = -1
-
- if strings.HasPrefix(hostport, "[") {
- addrEnd := strings.LastIndex(hostport, "]")
- if addrEnd < 0 {
- // Invalid hostport.
- return
- }
- if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
- host = hostport[1:addrEnd]
- return
- }
- } else {
- if i := strings.LastIndex(hostport, ":"); i < 0 {
- host = hostport
- return
- }
- }
-
- host, pStr, err := net.SplitHostPort(hostport)
- if err != nil {
- return
- }
-
- p, err := strconv.ParseUint(pStr, 10, 16)
- if err != nil {
- return
- }
- return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
-}
-
-func netProtocol(proto string) (name string, version string) {
- name, version, _ = strings.Cut(proto, "/")
- switch name {
- case "HTTP":
- name = "http"
- case "QUIC":
- name = "quic"
- case "SPDY":
- name = "spdy"
- default:
- name = strings.ToLower(name)
- }
- return name, version
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
deleted file mode 100644
index d62ce44b0..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "sync"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// Labeler is used to allow instrumented HTTP handlers to add custom attributes to
-// the metrics recorded by the net/http instrumentation.
-type Labeler struct {
- mu sync.Mutex
- attributes []attribute.KeyValue
-}
-
-// Add attributes to a Labeler.
-func (l *Labeler) Add(ls ...attribute.KeyValue) {
- l.mu.Lock()
- defer l.mu.Unlock()
- l.attributes = append(l.attributes, ls...)
-}
-
-// Get returns a copy of the attributes added to the Labeler.
-func (l *Labeler) Get() []attribute.KeyValue {
- l.mu.Lock()
- defer l.mu.Unlock()
- ret := make([]attribute.KeyValue, len(l.attributes))
- copy(ret, l.attributes)
- return ret
-}
-
-type labelerContextKeyType int
-
-const labelerContextKey labelerContextKeyType = 0
-
-// ContextWithLabeler returns a new context with the provided Labeler instance.
-// Attributes added to the specified labeler will be injected into metrics
-// emitted by the instrumentation. Only one labeller can be injected into the
-// context. Injecting it multiple times will override the previous calls.
-func ContextWithLabeler(parent context.Context, l *Labeler) context.Context {
- return context.WithValue(parent, labelerContextKey, l)
-}
-
-// LabelerFromContext retrieves a Labeler instance from the provided context if
-// one is available. If no Labeler was found in the provided context a new, empty
-// Labeler is returned and the second return value is false. In this case it is
-// safe to use the Labeler but any attributes added to it will not be used.
-func LabelerFromContext(ctx context.Context) (*Labeler, bool) {
- l, ok := ctx.Value(labelerContextKey).(*Labeler)
- if !ok {
- l = &Labeler{}
- }
- return l, ok
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
deleted file mode 100644
index 9476ef01b..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "time"
-)
-
-type startTimeContextKeyType int
-
-const startTimeContextKey startTimeContextKeyType = 0
-
-// ContextWithStartTime returns a new context with the provided start time. The
-// start time will be used for metrics and traces emitted by the
-// instrumentation. Only one labeller can be injected into the context.
-// Injecting it multiple times will override the previous calls.
-func ContextWithStartTime(parent context.Context, start time.Time) context.Context {
- return context.WithValue(parent, startTimeContextKey, start)
-}
-
-// StartTimeFromContext retrieves a time.Time from the provided context if one
-// is available. If no start time was found in the provided context, a new,
-// zero start time is returned and the second return value is false.
-func StartTimeFromContext(ctx context.Context) time.Time {
- t, _ := ctx.Value(startTimeContextKey).(time.Time)
- return t
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
deleted file mode 100644
index 44b86ad86..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "net/http/httptrace"
- "sync/atomic"
- "time"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/propagation"
-
- "go.opentelemetry.io/otel/trace"
-)
-
-// Transport implements the http.RoundTripper interface and wraps
-// outbound HTTP(S) requests with a span and enriches it with metrics.
-type Transport struct {
- rt http.RoundTripper
-
- tracer trace.Tracer
- propagators propagation.TextMapPropagator
- spanStartOptions []trace.SpanStartOption
- filters []Filter
- spanNameFormatter func(string, *http.Request) string
- clientTrace func(context.Context) *httptrace.ClientTrace
- metricAttributesFn func(*http.Request) []attribute.KeyValue
-
- semconv semconv.HTTPClient
-}
-
-var _ http.RoundTripper = &Transport{}
-
-// NewTransport wraps the provided http.RoundTripper with one that
-// starts a span, injects the span context into the outbound request headers,
-// and enriches it with metrics.
-//
-// If the provided http.RoundTripper is nil, http.DefaultTransport will be used
-// as the base http.RoundTripper.
-func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
- if base == nil {
- base = http.DefaultTransport
- }
-
- t := Transport{
- rt: base,
- }
-
- defaultOpts := []Option{
- WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)),
- WithSpanNameFormatter(defaultTransportFormatter),
- }
-
- c := newConfig(append(defaultOpts, opts...)...)
- t.applyConfig(c)
-
- return &t
-}
-
-func (t *Transport) applyConfig(c *config) {
- t.tracer = c.Tracer
- t.propagators = c.Propagators
- t.spanStartOptions = c.SpanStartOptions
- t.filters = c.Filters
- t.spanNameFormatter = c.SpanNameFormatter
- t.clientTrace = c.ClientTrace
- t.semconv = semconv.NewHTTPClient(c.Meter)
- t.metricAttributesFn = c.MetricAttributesFn
-}
-
-func defaultTransportFormatter(_ string, r *http.Request) string {
- return "HTTP " + r.Method
-}
-
-// RoundTrip creates a Span and propagates its context via the provided request's headers
-// before handing the request to the configured base RoundTripper. The created span will
-// end when the response body is closed or when a read from the body returns io.EOF.
-func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
- requestStartTime := time.Now()
- for _, f := range t.filters {
- if !f(r) {
- // Simply pass through to the base RoundTripper if a filter rejects the request
- return t.rt.RoundTrip(r)
- }
- }
-
- tracer := t.tracer
-
- if tracer == nil {
- if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
- tracer = newTracer(span.TracerProvider())
- } else {
- tracer = newTracer(otel.GetTracerProvider())
- }
- }
-
- opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options
-
- ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...)
-
- if t.clientTrace != nil {
- ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx))
- }
-
- labeler, found := LabelerFromContext(ctx)
- if !found {
- ctx = ContextWithLabeler(ctx, labeler)
- }
-
- r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
-
- // if request body is nil or NoBody, we don't want to mutate the body as it
- // will affect the identity of it in an unforeseeable way because we assert
- // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
- bw := request.NewBodyWrapper(r.Body, func(int64) {})
- if r.Body != nil && r.Body != http.NoBody {
- r.Body = bw
- }
-
- span.SetAttributes(t.semconv.RequestTraceAttrs(r)...)
- t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
-
- res, err := t.rt.RoundTrip(r)
- if err != nil {
- // set error type attribute if the error is part of the predefined
- // error types.
- // otherwise, record it as an exception
- if errType := t.semconv.ErrorType(err); errType.Valid() {
- span.SetAttributes(errType)
- } else {
- span.RecordError(err)
- }
-
- span.SetStatus(codes.Error, err.Error())
- span.End()
- return res, err
- }
-
- // metrics
- metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{
- Req: r,
- StatusCode: res.StatusCode,
- AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...),
- })
-
- // For handling response bytes we leverage a callback when the client reads the http response
- readRecordFunc := func(n int64) {
- t.semconv.RecordResponseSize(ctx, n, metricOpts)
- }
-
- // traces
- span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
- span.SetStatus(t.semconv.Status(res.StatusCode))
-
- res.Body = newWrappedBody(span, readRecordFunc, res.Body)
-
- // Use floating point division here for higher precision (instead of Millisecond method).
- elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
-
- t.semconv.RecordMetrics(ctx, semconv.MetricData{
- RequestSize: bw.BytesRead(),
- ElapsedTime: elapsedTime,
- }, metricOpts)
-
- return res, nil
-}
-
-func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
- var attributeForRequest []attribute.KeyValue
- if t.metricAttributesFn != nil {
- attributeForRequest = t.metricAttributesFn(r)
- }
- return attributeForRequest
-}
-
-// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
-// io.ReadCloser. If the passed body implements io.Writer, the returned value
-// will implement io.ReadWriteCloser.
-func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser {
- // The successful protocol switch responses will have a body that
- // implement an io.ReadWriteCloser. Ensure this interface type continues
- // to be satisfied if that is the case.
- if _, ok := body.(io.ReadWriteCloser); ok {
- return &wrappedBody{span: span, record: record, body: body}
- }
-
- // Remove the implementation of the io.ReadWriteCloser and only implement
- // the io.ReadCloser.
- return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}}
-}
-
-// wrappedBody is the response body type returned by the transport
-// instrumentation to complete a span. Errors encountered when using the
-// response body are recorded in span tracking the response.
-//
-// The span tracking the response is ended when this body is closed.
-//
-// If the response body implements the io.Writer interface (i.e. for
-// successful protocol switches), the wrapped body also will.
-type wrappedBody struct {
- span trace.Span
- recorded atomic.Bool
- record func(n int64)
- body io.ReadCloser
- read atomic.Int64
-}
-
-var _ io.ReadWriteCloser = &wrappedBody{}
-
-func (wb *wrappedBody) Write(p []byte) (int, error) {
- // This will not panic given the guard in newWrappedBody.
- n, err := wb.body.(io.Writer).Write(p)
- if err != nil {
- wb.span.RecordError(err)
- wb.span.SetStatus(codes.Error, err.Error())
- }
- return n, err
-}
-
-func (wb *wrappedBody) Read(b []byte) (int, error) {
- n, err := wb.body.Read(b)
- // Record the number of bytes read
- wb.read.Add(int64(n))
-
- switch err {
- case nil:
- // nothing to do here but fall through to the return
- case io.EOF:
- wb.recordBytesRead()
- wb.span.End()
- default:
- wb.span.RecordError(err)
- wb.span.SetStatus(codes.Error, err.Error())
- }
- return n, err
-}
-
-// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once.
-func (wb *wrappedBody) recordBytesRead() {
- // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that
- // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using
- // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish
- // calling wb.record(wb.read.Load()).
- if wb.recorded.CompareAndSwap(false, true) {
- // Record the total number of bytes read
- wb.record(wb.read.Load())
- }
-}
-
-func (wb *wrappedBody) Close() error {
- wb.recordBytesRead()
- wb.span.End()
- if wb.body != nil {
- return wb.body.Close()
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
deleted file mode 100644
index 6be4c1fde..000000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-// Version is the current release version of the otelhttp instrumentation.
-func Version() string {
- return "0.61.0"
- // This string is updated by the pre_release.sh script during release
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE
similarity index 88%
rename from vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
rename to vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE
index d64569567..f1aee0f11 100644
--- a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE
@@ -1,4 +1,3 @@
-
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -200,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/README.md b/vendor/go.opentelemetry.io/otel/exporters/prometheus/README.md
new file mode 100644
index 000000000..f4dc09d38
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/README.md
@@ -0,0 +1,3 @@
+# Prometheus Exporter
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/prometheus)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
new file mode 100644
index 000000000..dc3542637
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
@@ -0,0 +1,220 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
+
+import (
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/otlptranslator"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+)
+
+// config contains options for the exporter.
+type config struct {
+ registerer prometheus.Registerer
+ disableTargetInfo bool
+ translationStrategy otlptranslator.TranslationStrategyOption
+ withoutUnits bool
+ withoutCounterSuffixes bool
+ readerOpts []metric.ManualReaderOption
+ disableScopeInfo bool
+ namespace string
+ resourceAttributesFilter attribute.Filter
+}
+
+var logTemporaryDefault = sync.OnceFunc(func() {
+ global.Warn(
+ "The default Prometheus naming translation strategy is planned to be changed from otlptranslator.NoUTF8EscapingWithSuffixes to otlptranslator.UnderscoreEscapingWithSuffixes in a future release. Add prometheus.WithTranslationStrategy(otlptranslator.NoUTF8EscapingWithSuffixes) to preserve the existing behavior, or prometheus.WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes) to opt into the future default behavior.",
+ )
+})
+
+// newConfig creates a validated config configured with options.
+func newConfig(opts ...Option) config {
+ cfg := config{}
+ for _, opt := range opts {
+ cfg = opt.apply(cfg)
+ }
+
+ if cfg.translationStrategy == "" {
+ // If no translation strategy was specified, deduce one based on the global
+ // NameValidationScheme. NOTE: this logic will change in the future, always
+ // defaulting to UnderscoreEscapingWithSuffixes
+
+ //nolint:staticcheck // NameValidationScheme is deprecated but we still need it for now.
+ if model.NameValidationScheme == model.UTF8Validation {
+ logTemporaryDefault()
+ cfg.translationStrategy = otlptranslator.NoUTF8EscapingWithSuffixes
+ } else {
+ cfg.translationStrategy = otlptranslator.UnderscoreEscapingWithSuffixes
+ }
+ } else {
+ // Note, if the translation strategy implies that suffixes should be added,
+ // the user can still use WithoutUnits and WithoutCounterSuffixes to
+ // explicitly disable specific suffixes. We do not override their preference
+ // in this case. However if the chosen strategy disables suffixes, we should
+ // forcibly disable all of them.
+ if !cfg.translationStrategy.ShouldAddSuffixes() {
+ cfg.withoutCounterSuffixes = true
+ cfg.withoutUnits = true
+ }
+ }
+
+ if cfg.registerer == nil {
+ cfg.registerer = prometheus.DefaultRegisterer
+ }
+
+ return cfg
+}
+
+// Option sets exporter option values.
+type Option interface {
+ apply(config) config
+}
+
+type optionFunc func(config) config
+
+func (fn optionFunc) apply(cfg config) config {
+ return fn(cfg)
+}
+
+// WithRegisterer configures which prometheus Registerer the Exporter will
+// register with. If no registerer is used the prometheus DefaultRegisterer is
+// used.
+func WithRegisterer(reg prometheus.Registerer) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.registerer = reg
+ return cfg
+ })
+}
+
+// WithAggregationSelector configure the Aggregation Selector the exporter will
+// use. If no AggregationSelector is provided the DefaultAggregationSelector is
+// used.
+func WithAggregationSelector(agg metric.AggregationSelector) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.readerOpts = append(cfg.readerOpts, metric.WithAggregationSelector(agg))
+ return cfg
+ })
+}
+
+// WithProducer configure the metric Producer the exporter will use as a source
+// of external metric data.
+func WithProducer(producer metric.Producer) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.readerOpts = append(cfg.readerOpts, metric.WithProducer(producer))
+ return cfg
+ })
+}
+
+// WithoutTargetInfo configures the Exporter to not export the resource target_info metric.
+// If not specified, the Exporter will create a target_info metric containing
+// the metrics' resource.Resource attributes.
+func WithoutTargetInfo() Option {
+ return optionFunc(func(cfg config) config {
+ cfg.disableTargetInfo = true
+ return cfg
+ })
+}
+
+// WithTranslationStrategy provides a standardized way to define how metric and
+// label names should be handled during translation to Prometheus format. See:
+// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.48.0/specification/metrics/sdk_exporters/prometheus.md#configuration.
+// The recommended approach is to use either
+// [otlptranslator.UnderscoreEscapingWithSuffixes] for full Prometheus-style
+// compatibility or [otlptranslator.NoTranslation] for OpenTelemetry-style names.
+//
+// By default, if the NameValidationScheme variable in
+// [github.com/prometheus/common/model] is "legacy", the default strategy is
+// [otlptranslator.UnderscoreEscapingWithSuffixes]. If the validation scheme is
+// "utf8", then currently the default Strategy is
+// [otlptranslator.NoUTF8EscapingWithSuffixes].
+//
+// Notice: It is planned that a future release of this SDK will change the
+// default to always be [otlptranslator.UnderscoreEscapingWithSuffixes] in all
+// circumstances. Users wanting a different translation strategy should specify
+// it explicitly.
+func WithTranslationStrategy(strategy otlptranslator.TranslationStrategyOption) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.translationStrategy = strategy
+ return cfg
+ })
+}
+
+// WithoutUnits disables exporter's addition of unit suffixes to metric names,
+// and will also prevent unit comments from being added in OpenMetrics once
+// unit comments are supported.
+//
+// By default, metric names include a unit suffix to follow Prometheus naming
+// conventions. For example, the counter metric request.duration, with unit
+// milliseconds would become request_duration_milliseconds_total.
+// With this option set, the name would instead be request_duration_total.
+//
+// Can be used in conjunction with [WithTranslationStrategy] to disable unit
+// suffixes in strategies that would otherwise add suffixes, but this behavior
+// is not recommended and may be removed in a future release.
+//
+// Deprecated: Use [WithTranslationStrategy] instead.
+func WithoutUnits() Option {
+ return optionFunc(func(cfg config) config {
+ cfg.withoutUnits = true
+ return cfg
+ })
+}
+
+// WithoutCounterSuffixes disables exporter's addition _total suffixes on
+// counters.
+//
+// By default, metric names include a _total suffix to follow Prometheus naming
+// conventions. For example, the counter metric happy.people would become
+// happy_people_total. With this option set, the name would instead be
+// happy_people.
+//
+// Can be used in conjunction with [WithTranslationStrategy] to disable counter
+// suffixes in strategies that would otherwise add suffixes, but this behavior
+// is not recommended and may be removed in a future release.
+//
+// Deprecated: Use [WithTranslationStrategy] instead.
+func WithoutCounterSuffixes() Option {
+ return optionFunc(func(cfg config) config {
+ cfg.withoutCounterSuffixes = true
+ return cfg
+ })
+}
+
+// WithoutScopeInfo configures the Exporter to not export
+// labels about Instrumentation Scope to all metric points.
+func WithoutScopeInfo() Option {
+ return optionFunc(func(cfg config) config {
+ cfg.disableScopeInfo = true
+ return cfg
+ })
+}
+
+// WithNamespace configures the Exporter to prefix metric with the given
+// namespace. Metadata metrics such as target_info are not prefixed since these
+// have special behavior based on their name. Namespaces will be prepended even
+// if [otlptranslator.NoTranslation] is set as a translation strategy. If the provided namespace
+// is empty, nothing will be prepended to metric names.
+func WithNamespace(ns string) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.namespace = ns
+ return cfg
+ })
+}
+
+// WithResourceAsConstantLabels configures the Exporter to add the resource attributes the
+// resourceFilter returns true for as attributes on all exported metrics.
+//
+// The does not affect the target info generated from resource attributes.
+func WithResourceAsConstantLabels(resourceFilter attribute.Filter) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.resourceAttributesFilter = resourceFilter
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/doc.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/doc.go
new file mode 100644
index 000000000..e9b77869e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/doc.go
@@ -0,0 +1,7 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package prometheus provides a Prometheus Exporter that converts
+// OTLP metrics into the Prometheus exposition format and implements
+// prometheus.Collector to provide a handler for these metrics.
+package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
new file mode 100644
index 000000000..0f29c0abb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
@@ -0,0 +1,715 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "slices"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/otlptranslator"
+ "google.golang.org/protobuf/proto"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+const (
+ targetInfoDescription = "Target metadata"
+
+ scopeLabelPrefix = "otel_scope_"
+ scopeNameLabel = scopeLabelPrefix + "name"
+ scopeVersionLabel = scopeLabelPrefix + "version"
+ scopeSchemaLabel = scopeLabelPrefix + "schema_url"
+)
+
+var metricsPool = sync.Pool{
+ New: func() any {
+ return &metricdata.ResourceMetrics{}
+ },
+}
+
+// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
+// interface for easy instantiation with a MeterProvider.
+type Exporter struct {
+ metric.Reader
+}
+
+// MarshalLog returns logging data about the Exporter.
+func (e *Exporter) MarshalLog() any {
+ const t = "Prometheus exporter"
+
+ if r, ok := e.Reader.(*metric.ManualReader); ok {
+ under := r.MarshalLog()
+ if data, ok := under.(struct {
+ Type string
+ Registered bool
+ Shutdown bool
+ }); ok {
+ data.Type = t
+ return data
+ }
+ }
+
+ return struct{ Type string }{Type: t}
+}
+
+var _ metric.Reader = &Exporter{}
+
+// keyVals is used to store resource attribute key value pairs.
+type keyVals struct {
+ keys []string
+ vals []string
+}
+
+// collector is used to implement prometheus.Collector.
+type collector struct {
+ reader metric.Reader
+
+ withoutUnits bool
+ withoutCounterSuffixes bool
+ disableScopeInfo bool
+ namespace string
+ resourceAttributesFilter attribute.Filter
+
+ mu sync.Mutex // mu protects all members below from the concurrent access.
+ disableTargetInfo bool
+ targetInfo prometheus.Metric
+ metricFamilies map[string]*dto.MetricFamily
+ resourceKeyVals keyVals
+ metricNamer otlptranslator.MetricNamer
+ labelNamer otlptranslator.LabelNamer
+ unitNamer otlptranslator.UnitNamer
+}
+
+// New returns a Prometheus Exporter.
+func New(opts ...Option) (*Exporter, error) {
+ cfg := newConfig(opts...)
+
+ // this assumes that the default temporality selector will always return cumulative.
+ // we only support cumulative temporality, so building our own reader enforces this.
+ // TODO (#3244): Enable some way to configure the reader, but not change temporality.
+ reader := metric.NewManualReader(cfg.readerOpts...)
+
+ labelNamer := otlptranslator.LabelNamer{UTF8Allowed: !cfg.translationStrategy.ShouldEscape()}
+ escapedNamespace := cfg.namespace
+ if escapedNamespace != "" {
+ var err error
+ // If the namespace needs to be escaped, do that now when creating the new
+ // Collector object. The escaping is not persisted in the Config itself.
+ escapedNamespace, err = labelNamer.Build(escapedNamespace)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ collector := &collector{
+ reader: reader,
+ disableTargetInfo: cfg.disableTargetInfo,
+ withoutUnits: cfg.withoutUnits,
+ withoutCounterSuffixes: cfg.withoutCounterSuffixes,
+ disableScopeInfo: cfg.disableScopeInfo,
+ metricFamilies: make(map[string]*dto.MetricFamily),
+ namespace: escapedNamespace,
+ resourceAttributesFilter: cfg.resourceAttributesFilter,
+ metricNamer: otlptranslator.NewMetricNamer(escapedNamespace, cfg.translationStrategy),
+ unitNamer: otlptranslator.UnitNamer{UTF8Allowed: !cfg.translationStrategy.ShouldEscape()},
+ labelNamer: labelNamer,
+ }
+
+ if err := cfg.registerer.Register(collector); err != nil {
+ return nil, fmt.Errorf("cannot register the collector: %w", err)
+ }
+
+ e := &Exporter{
+ Reader: reader,
+ }
+
+ return e, nil
+}
+
+// Describe implements prometheus.Collector.
+func (*collector) Describe(chan<- *prometheus.Desc) {
+ // The Opentelemetry SDK doesn't have information on which will exist when the collector
+ // is registered. By returning nothing we are an "unchecked" collector in Prometheus,
+ // and assume responsibility for consistency of the metrics produced.
+ //
+ // See https://pkg.go.dev/github.com/prometheus/client_golang@v1.13.0/prometheus#hdr-Custom_Collectors_and_constant_Metrics
+}
+
+// Collect implements prometheus.Collector.
+//
+// This method is safe to call concurrently.
+func (c *collector) Collect(ch chan<- prometheus.Metric) {
+ metrics := metricsPool.Get().(*metricdata.ResourceMetrics)
+ defer metricsPool.Put(metrics)
+ err := c.reader.Collect(context.TODO(), metrics)
+ if err != nil {
+ if errors.Is(err, metric.ErrReaderShutdown) {
+ return
+ }
+ otel.Handle(err)
+ if errors.Is(err, metric.ErrReaderNotRegistered) {
+ return
+ }
+ }
+
+ global.Debug("Prometheus exporter export", "Data", metrics)
+
+ // Initialize (once) targetInfo and disableTargetInfo.
+ func() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.targetInfo == nil && !c.disableTargetInfo {
+ targetInfo, err := c.createInfoMetric(
+ otlptranslator.TargetInfoMetricName,
+ targetInfoDescription,
+ metrics.Resource,
+ )
+ if err != nil {
+ // If the target info metric is invalid, disable sending it.
+ c.disableTargetInfo = true
+ otel.Handle(err)
+ return
+ }
+
+ c.targetInfo = targetInfo
+ }
+ }()
+
+ if !c.disableTargetInfo {
+ ch <- c.targetInfo
+ }
+
+ if c.resourceAttributesFilter != nil && len(c.resourceKeyVals.keys) == 0 {
+ err := c.createResourceAttributes(metrics.Resource)
+ if err != nil {
+ otel.Handle(err)
+ return
+ }
+ }
+
+ for _, scopeMetrics := range metrics.ScopeMetrics {
+ n := len(c.resourceKeyVals.keys) + 2 // resource attrs + scope name + scope version
+ kv := keyVals{
+ keys: make([]string, 0, n),
+ vals: make([]string, 0, n),
+ }
+
+ if !c.disableScopeInfo {
+ kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel)
+ kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL)
+
+ attrKeys, attrVals, err := getAttrs(scopeMetrics.Scope.Attributes, c.labelNamer)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+ for i := range attrKeys {
+ attrKeys[i] = scopeLabelPrefix + attrKeys[i]
+ }
+ kv.keys = append(kv.keys, attrKeys...)
+ kv.vals = append(kv.vals, attrVals...)
+ }
+
+ kv.keys = append(kv.keys, c.resourceKeyVals.keys...)
+ kv.vals = append(kv.vals, c.resourceKeyVals.vals...)
+
+ for _, m := range scopeMetrics.Metrics {
+ typ := c.metricType(m)
+ if typ == nil {
+ continue
+ }
+ name, err := c.getName(m)
+ if err != nil {
+ // TODO(#7066): Handle this error better. It's not clear this can be
+ // reached, bad metric names should / will be caught at creation time.
+ otel.Handle(err)
+ continue
+ }
+
+ drop, help := c.validateMetrics(name, m.Description, typ)
+ if drop {
+ continue
+ }
+
+ if help != "" {
+ m.Description = help
+ }
+
+ switch v := m.Data.(type) {
+ case metricdata.Histogram[int64]:
+ addHistogramMetric(ch, v, m, name, kv, c.labelNamer)
+ case metricdata.Histogram[float64]:
+ addHistogramMetric(ch, v, m, name, kv, c.labelNamer)
+ case metricdata.ExponentialHistogram[int64]:
+ addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer)
+ case metricdata.ExponentialHistogram[float64]:
+ addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer)
+ case metricdata.Sum[int64]:
+ addSumMetric(ch, v, m, name, kv, c.labelNamer)
+ case metricdata.Sum[float64]:
+ addSumMetric(ch, v, m, name, kv, c.labelNamer)
+ case metricdata.Gauge[int64]:
+ addGaugeMetric(ch, v, m, name, kv, c.labelNamer)
+ case metricdata.Gauge[float64]:
+ addGaugeMetric(ch, v, m, name, kv, c.labelNamer)
+ }
+ }
+ }
+}
+
+// downscaleExponentialBucket re-aggregates bucket counts when downscaling to a coarser resolution.
+func downscaleExponentialBucket(bucket metricdata.ExponentialBucket, scaleDelta int32) metricdata.ExponentialBucket {
+ if len(bucket.Counts) == 0 || scaleDelta < 1 {
+ return metricdata.ExponentialBucket{
+ Offset: bucket.Offset >> scaleDelta,
+ Counts: append([]uint64(nil), bucket.Counts...), // copy slice
+ }
+ }
+
+ // The new offset is scaled down
+ newOffset := bucket.Offset >> scaleDelta
+
+ // Pre-calculate the new bucket count to avoid growing slice
+ // Each group of 2^scaleDelta buckets will merge into one bucket
+ //nolint:gosec // Length is bounded by slice allocation
+ lastBucketIdx := bucket.Offset + int32(len(bucket.Counts)) - 1
+ lastNewIdx := lastBucketIdx >> scaleDelta
+ newBucketCount := int(lastNewIdx - newOffset + 1)
+
+ if newBucketCount <= 0 {
+ return metricdata.ExponentialBucket{
+ Offset: newOffset,
+ Counts: []uint64{},
+ }
+ }
+
+ newCounts := make([]uint64, newBucketCount)
+
+ // Merge buckets according to the scale difference
+ for i, count := range bucket.Counts {
+ if count == 0 {
+ continue
+ }
+
+ // Calculate which new bucket this count belongs to
+ //nolint:gosec // Index is bounded by loop iteration
+ originalIdx := bucket.Offset + int32(i)
+ newIdx := originalIdx >> scaleDelta
+
+ // Calculate the position in the new counts array
+ position := newIdx - newOffset
+ //nolint:gosec // Length is bounded by allocation
+ if position >= 0 && position < int32(len(newCounts)) {
+ newCounts[position] += count
+ }
+ }
+
+ return metricdata.ExponentialBucket{
+ Offset: newOffset,
+ Counts: newCounts,
+ }
+}
+
+func addExponentialHistogramMetric[N int64 | float64](
+ ch chan<- prometheus.Metric,
+ histogram metricdata.ExponentialHistogram[N],
+ m metricdata.Metrics,
+ name string,
+ kv keyVals,
+ labelNamer otlptranslator.LabelNamer,
+) {
+ for _, dp := range histogram.DataPoints {
+ keys, values, err := getAttrs(dp.Attributes, labelNamer)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+ keys = append(keys, kv.keys...)
+ values = append(values, kv.vals...)
+
+ desc := prometheus.NewDesc(name, m.Description, keys, nil)
+
+ // Prometheus native histograms support scales in the range [-4, 8]
+ scale := dp.Scale
+ if scale < -4 {
+ // Reject scales below -4 as they cannot be represented in Prometheus
+ otel.Handle(fmt.Errorf(
+ "exponential histogram scale %d is below minimum supported scale -4, skipping data point",
+ scale))
+ continue
+ }
+
+ // If scale > 8, we need to downscale the buckets to match the clamped scale
+ positiveBucket := dp.PositiveBucket
+ negativeBucket := dp.NegativeBucket
+ if scale > 8 {
+ scaleDelta := scale - 8
+ positiveBucket = downscaleExponentialBucket(dp.PositiveBucket, scaleDelta)
+ negativeBucket = downscaleExponentialBucket(dp.NegativeBucket, scaleDelta)
+ scale = 8
+ }
+
+ // From spec: note that Prometheus Native Histograms buckets are indexed by upper boundary while Exponential Histograms are indexed by lower boundary, the result being that the Offset fields are different-by-one.
+ positiveBuckets := make(map[int]int64)
+ for i, c := range positiveBucket.Counts {
+ if c > math.MaxInt64 {
+ otel.Handle(fmt.Errorf("positive count %d is too large to be represented as int64", c))
+ continue
+ }
+ positiveBuckets[int(positiveBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
+ }
+
+ negativeBuckets := make(map[int]int64)
+ for i, c := range negativeBucket.Counts {
+ if c > math.MaxInt64 {
+ otel.Handle(fmt.Errorf("negative count %d is too large to be represented as int64", c))
+ continue
+ }
+ negativeBuckets[int(negativeBucket.Offset)+i+1] = int64(c) // nolint: gosec // Size check above.
+ }
+
+ m, err := prometheus.NewConstNativeHistogram(
+ desc,
+ dp.Count,
+ float64(dp.Sum),
+ positiveBuckets,
+ negativeBuckets,
+ dp.ZeroCount,
+ scale,
+ dp.ZeroThreshold,
+ dp.StartTime,
+ values...)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+ m = addExemplars(m, dp.Exemplars, labelNamer)
+ ch <- m
+ }
+}
+
+func addHistogramMetric[N int64 | float64](
+ ch chan<- prometheus.Metric,
+ histogram metricdata.Histogram[N],
+ m metricdata.Metrics,
+ name string,
+ kv keyVals,
+ labelNamer otlptranslator.LabelNamer,
+) {
+ for _, dp := range histogram.DataPoints {
+ keys, values, err := getAttrs(dp.Attributes, labelNamer)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+ keys = append(keys, kv.keys...)
+ values = append(values, kv.vals...)
+
+ desc := prometheus.NewDesc(name, m.Description, keys, nil)
+ buckets := make(map[float64]uint64, len(dp.Bounds))
+
+ cumulativeCount := uint64(0)
+ for i, bound := range dp.Bounds {
+ cumulativeCount += dp.BucketCounts[i]
+ buckets[bound] = cumulativeCount
+ }
+ m, err := prometheus.NewConstHistogram(desc, dp.Count, float64(dp.Sum), buckets, values...)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+ m = addExemplars(m, dp.Exemplars, labelNamer)
+ ch <- m
+ }
+}
+
+func addSumMetric[N int64 | float64](
+ ch chan<- prometheus.Metric,
+ sum metricdata.Sum[N],
+ m metricdata.Metrics,
+ name string,
+ kv keyVals,
+ labelNamer otlptranslator.LabelNamer,
+) {
+ valueType := prometheus.CounterValue
+ if !sum.IsMonotonic {
+ valueType = prometheus.GaugeValue
+ }
+
+ for _, dp := range sum.DataPoints {
+ keys, values, err := getAttrs(dp.Attributes, labelNamer)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+ keys = append(keys, kv.keys...)
+ values = append(values, kv.vals...)
+
+ desc := prometheus.NewDesc(name, m.Description, keys, nil)
+ m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+ // GaugeValues don't support Exemplars at this time
+ // https://github.com/prometheus/client_golang/blob/aef8aedb4b6e1fb8ac1c90790645169125594096/prometheus/metric.go#L199
+ if valueType != prometheus.GaugeValue {
+ m = addExemplars(m, dp.Exemplars, labelNamer)
+ }
+ ch <- m
+ }
+}
+
+func addGaugeMetric[N int64 | float64](
+ ch chan<- prometheus.Metric,
+ gauge metricdata.Gauge[N],
+ m metricdata.Metrics,
+ name string,
+ kv keyVals,
+ labelNamer otlptranslator.LabelNamer,
+) {
+ for _, dp := range gauge.DataPoints {
+ keys, values, err := getAttrs(dp.Attributes, labelNamer)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+ keys = append(keys, kv.keys...)
+ values = append(values, kv.vals...)
+
+ desc := prometheus.NewDesc(name, m.Description, keys, nil)
+ m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...)
+ if err != nil {
+ otel.Handle(err)
+ continue
+ }
+ ch <- m
+ }
+}
+
+// getAttrs converts the attribute.Set to two lists of matching Prometheus-style
+// keys and values.
+func getAttrs(attrs attribute.Set, labelNamer otlptranslator.LabelNamer) ([]string, []string, error) {
+ keys := make([]string, 0, attrs.Len())
+ values := make([]string, 0, attrs.Len())
+ itr := attrs.Iter()
+
+ if labelNamer.UTF8Allowed {
+ // Do not perform sanitization if prometheus supports UTF-8.
+ for itr.Next() {
+ kv := itr.Attribute()
+ keys = append(keys, string(kv.Key))
+ values = append(values, kv.Value.Emit())
+ }
+ } else {
+ // It sanitizes invalid characters and handles duplicate keys
+ // (due to sanitization) by sorting and concatenating the values following the spec.
+ keysMap := make(map[string][]string)
+ for itr.Next() {
+ kv := itr.Attribute()
+ key, err := labelNamer.Build(string(kv.Key))
+ if err != nil {
+ // TODO(#7066) Handle this error better.
+ return nil, nil, err
+ }
+ if _, ok := keysMap[key]; !ok {
+ keysMap[key] = []string{kv.Value.Emit()}
+ } else {
+ // if the sanitized key is a duplicate, append to the list of keys
+ keysMap[key] = append(keysMap[key], kv.Value.Emit())
+ }
+ }
+ for key, vals := range keysMap {
+ keys = append(keys, key)
+ slices.Sort(vals)
+ values = append(values, strings.Join(vals, ";"))
+ }
+ }
+ return keys, values, nil
+}
+
+func (c *collector) createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) {
+ keys, values, err := getAttrs(*res.Set(), c.labelNamer)
+ if err != nil {
+ return nil, err
+ }
+ desc := prometheus.NewDesc(name, description, keys, nil)
+ return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
+}
+
+// getName returns the sanitized name, translated according to the selected
+// TranslationStrategy and namespace option.
+func (c *collector) getName(m metricdata.Metrics) (string, error) {
+ translatorMetric := otlptranslator.Metric{
+ Name: m.Name,
+ Type: c.namingMetricType(m),
+ }
+ if !c.withoutUnits {
+ translatorMetric.Unit = m.Unit
+ }
+ return c.metricNamer.Build(translatorMetric)
+}
+
+func (*collector) metricType(m metricdata.Metrics) *dto.MetricType {
+ switch v := m.Data.(type) {
+ case metricdata.ExponentialHistogram[int64], metricdata.ExponentialHistogram[float64]:
+ return dto.MetricType_HISTOGRAM.Enum()
+ case metricdata.Histogram[int64], metricdata.Histogram[float64]:
+ return dto.MetricType_HISTOGRAM.Enum()
+ case metricdata.Sum[float64]:
+ if v.IsMonotonic {
+ return dto.MetricType_COUNTER.Enum()
+ }
+ return dto.MetricType_GAUGE.Enum()
+ case metricdata.Sum[int64]:
+ if v.IsMonotonic {
+ return dto.MetricType_COUNTER.Enum()
+ }
+ return dto.MetricType_GAUGE.Enum()
+ case metricdata.Gauge[int64], metricdata.Gauge[float64]:
+ return dto.MetricType_GAUGE.Enum()
+ }
+ return nil
+}
+
+// namingMetricType provides the metric type for naming purposes.
+func (c *collector) namingMetricType(m metricdata.Metrics) otlptranslator.MetricType {
+ switch v := m.Data.(type) {
+ case metricdata.ExponentialHistogram[int64], metricdata.ExponentialHistogram[float64]:
+ return otlptranslator.MetricTypeHistogram
+ case metricdata.Histogram[int64], metricdata.Histogram[float64]:
+ return otlptranslator.MetricTypeHistogram
+ case metricdata.Sum[float64]:
+ // If counter suffixes are disabled, treat them like non-monotonic
+ // suffixes for the purposes of naming.
+ if v.IsMonotonic && !c.withoutCounterSuffixes {
+ return otlptranslator.MetricTypeMonotonicCounter
+ }
+ return otlptranslator.MetricTypeNonMonotonicCounter
+ case metricdata.Sum[int64]:
+ // If counter suffixes are disabled, treat them like non-monotonic
+ // suffixes for the purposes of naming.
+ if v.IsMonotonic && !c.withoutCounterSuffixes {
+ return otlptranslator.MetricTypeMonotonicCounter
+ }
+ return otlptranslator.MetricTypeNonMonotonicCounter
+ case metricdata.Gauge[int64], metricdata.Gauge[float64]:
+ return otlptranslator.MetricTypeGauge
+ case metricdata.Summary:
+ return otlptranslator.MetricTypeSummary
+ }
+ return otlptranslator.MetricTypeUnknown
+}
+
+func (c *collector) createResourceAttributes(res *resource.Resource) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter)
+ resourceKeys, resourceValues, err := getAttrs(resourceAttrs, c.labelNamer)
+ if err != nil {
+ return err
+ }
+
+ c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues}
+ return nil
+}
+
+func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ emf, exist := c.metricFamilies[name]
+
+ if !exist {
+ c.metricFamilies[name] = &dto.MetricFamily{
+ Name: proto.String(name),
+ Help: proto.String(description),
+ Type: metricType,
+ }
+ return false, ""
+ }
+
+ if emf.GetType() != *metricType {
+ global.Error(
+ errors.New("instrument type conflict"),
+ "Using existing type definition.",
+ "instrument", name,
+ "existing", emf.GetType(),
+ "dropped", *metricType,
+ )
+ return true, ""
+ }
+ if emf.GetHelp() != description {
+ global.Info(
+ "Instrument description conflict, using existing",
+ "instrument", name,
+ "existing", emf.GetHelp(),
+ "dropped", description,
+ )
+ return false, emf.GetHelp()
+ }
+
+ return false, ""
+}
+
+func addExemplars[N int64 | float64](
+ m prometheus.Metric,
+ exemplars []metricdata.Exemplar[N],
+ labelNamer otlptranslator.LabelNamer,
+) prometheus.Metric {
+ if len(exemplars) == 0 {
+ return m
+ }
+ promExemplars := make([]prometheus.Exemplar, len(exemplars))
+ for i, exemplar := range exemplars {
+ labels, err := attributesToLabels(exemplar.FilteredAttributes, labelNamer)
+ if err != nil {
+ otel.Handle(err)
+ return m
+ }
+ // Overwrite any existing trace ID or span ID attributes
+ labels[otlptranslator.ExemplarTraceIDKey] = hex.EncodeToString(exemplar.TraceID)
+ labels[otlptranslator.ExemplarSpanIDKey] = hex.EncodeToString(exemplar.SpanID)
+ promExemplars[i] = prometheus.Exemplar{
+ Value: float64(exemplar.Value),
+ Timestamp: exemplar.Time,
+ Labels: labels,
+ }
+ }
+ metricWithExemplar, err := prometheus.NewMetricWithExemplars(m, promExemplars...)
+ if err != nil {
+ // If there are errors creating the metric with exemplars, just warn
+ // and return the metric without exemplars.
+ otel.Handle(err)
+ return m
+ }
+ return metricWithExemplar
+}
+
+func attributesToLabels(attrs []attribute.KeyValue, labelNamer otlptranslator.LabelNamer) (prometheus.Labels, error) {
+ labels := make(map[string]string)
+ for _, attr := range attrs {
+ name, err := labelNamer.Build(string(attr.Key))
+ if err != nil {
+ return nil, err
+ }
+ labels[name] = attr.Value.Emit()
+ }
+ return labels, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
similarity index 88%
rename from vendor/github.com/aws/aws-sdk-go/LICENSE.txt
rename to vendor/go.opentelemetry.io/otel/sdk/LICENSE
index d64569567..f1aee0f11 100644
--- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
+++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
@@ -1,4 +1,3 @@
-
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@@ -200,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md
new file mode 100644
index 000000000..f81b1576a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/README.md
@@ -0,0 +1,3 @@
+# SDK
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
new file mode 100644
index 000000000..06e6d8685
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
@@ -0,0 +1,3 @@
+# SDK Instrumentation
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
new file mode 100644
index 000000000..a4faa6a03
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
@@ -0,0 +1,13 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package instrumentation provides types to represent the code libraries that
+// provide OpenTelemetry instrumentation. These types are used in the
+// OpenTelemetry signal pipelines to identify the source of telemetry.
+//
+// See
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
+// and
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
+// for more information.
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
new file mode 100644
index 000000000..f2cdf3c65
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+// Library represents the instrumentation library.
+//
+// Deprecated: use [Scope] instead.
+type Library = Scope
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
new file mode 100644
index 000000000..34852a47b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Scope represents the instrumentation scope.
+type Scope struct {
+ // Name is the name of the instrumentation scope. This should be the
+ // Go package name of that scope.
+ Name string
+ // Version is the version of the instrumentation scope.
+ Version string
+ // SchemaURL of the telemetry emitted by the scope.
+ SchemaURL string
+ // Attributes of the telemetry emitted by the scope.
+ Attributes attribute.Set
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
new file mode 100644
index 000000000..fab61647c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
@@ -0,0 +1,46 @@
+# Experimental Features
+
+The SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These feature may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Resource](#resource)
+
+### Resource
+
+[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental.
+To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable.
+The value set must be the case-insensitive string of `"true"` to enable the feature.
+All other values are ignored.
+
+
+
+[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/
+[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector
+
+#### Examples
+
+Enable experimental resource semantic conventions.
+
+```console
+export OTEL_GO_X_RESOURCE=true
+```
+
+Disable experimental resource semantic conventions.
+
+```console
+unset OTEL_GO_X_RESOURCE
+```
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
new file mode 100644
index 000000000..1be472e91
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x contains support for OTel SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
+package x // import "go.opentelemetry.io/otel/sdk/internal/x"
+
+import (
+ "os"
+ "strings"
+)
+
+// Resource is an experimental feature flag that defines if resource detectors
+// should be included experimental semantic conventions.
+//
+// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
+ if strings.EqualFold(v, "true") {
+ return v, true
+ }
+ return "", false
+})
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled reports whether the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/github.com/prometheus/prometheus/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
similarity index 88%
rename from vendor/github.com/prometheus/prometheus/LICENSE
rename to vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/github.com/prometheus/prometheus/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md
new file mode 100644
index 000000000..017f072a5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/README.md
@@ -0,0 +1,3 @@
+# Metric SDK
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
new file mode 100644
index 000000000..e6f5cfb2a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "errors"
+ "fmt"
+ "slices"
+)
+
+// errAgg is wrapped by misconfigured aggregations.
+var errAgg = errors.New("aggregation")
+
+// Aggregation is the aggregation used to summarize recorded measurements.
+type Aggregation interface {
+ // copy returns a deep copy of the Aggregation.
+ copy() Aggregation
+
+ // err returns an error for any misconfigured Aggregation.
+ err() error
+}
+
+// AggregationDrop is an Aggregation that drops all recorded data.
+type AggregationDrop struct{} // AggregationDrop has no parameters.
+
+var _ Aggregation = AggregationDrop{}
+
+// copy returns a deep copy of d.
+func (d AggregationDrop) copy() Aggregation { return d }
+
+// err returns an error for any misconfiguration. A drop aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationDrop) err() error { return nil }
+
+// AggregationDefault is an Aggregation that uses the default instrument kind selection
+// mapping to select another Aggregation. A metric reader can be configured to
+// make an aggregation selection based on instrument kind that differs from
+// the default. This Aggregation ensures the default is used.
+//
+// See the [DefaultAggregationSelector] for information about the default
+// instrument kind selection mapping.
+type AggregationDefault struct{} // AggregationDefault has no parameters.
+
+var _ Aggregation = AggregationDefault{}
+
+// copy returns a deep copy of d.
+func (d AggregationDefault) copy() Aggregation { return d }
+
+// err returns an error for any misconfiguration. A default aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationDefault) err() error { return nil }
+
+// AggregationSum is an Aggregation that summarizes a set of measurements as their
+// arithmetic sum.
+type AggregationSum struct{} // AggregationSum has no parameters.
+
+var _ Aggregation = AggregationSum{}
+
+// copy returns a deep copy of s.
+func (s AggregationSum) copy() Aggregation { return s }
+
+// err returns an error for any misconfiguration. A sum aggregation has no
+// parameters and cannot be misconfigured, therefore this always returns nil.
+func (AggregationSum) err() error { return nil }
+
+// AggregationLastValue is an Aggregation that summarizes a set of measurements as the
+// last one made.
+type AggregationLastValue struct{} // AggregationLastValue has no parameters.
+
+var _ Aggregation = AggregationLastValue{}
+
+// copy returns a deep copy of l.
+func (l AggregationLastValue) copy() Aggregation { return l }
+
+// err returns an error for any misconfiguration. A last-value aggregation has
+// no parameters and cannot be misconfigured, therefore this always returns
+// nil.
+func (AggregationLastValue) err() error { return nil }
+
+// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of
+// measurements as an histogram with explicitly defined buckets.
+type AggregationExplicitBucketHistogram struct {
+ // Boundaries are the increasing bucket boundary values. Boundary values
+ // define bucket upper bounds. Buckets are exclusive of their lower
+ // boundary and inclusive of their upper bound (except at positive
+ // infinity). A measurement is defined to fall into the greatest-numbered
+ // bucket with a boundary that is greater than or equal to the
+ // measurement. As an example, boundaries defined as:
+ //
+ // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}
+ //
+ // Will define these buckets:
+ //
+ // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0],
+ // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0],
+ // (500.0, 1000.0], (1000.0, +∞)
+ Boundaries []float64
+ // NoMinMax indicates whether to not record the min and max of the
+ // distribution. By default, these extrema are recorded.
+ //
+ // Recording these extrema for cumulative data is expected to have little
+ // value, they will represent the entire life of the instrument instead of
+ // just the current collection cycle. It is recommended to set this to true
+ // for that type of data to avoid computing the low-value extrema.
+ NoMinMax bool
+}
+
+var _ Aggregation = AggregationExplicitBucketHistogram{}
+
+// errHist is returned by misconfigured ExplicitBucketHistograms.
+var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg)
+
+// err returns an error for any misconfiguration.
+func (h AggregationExplicitBucketHistogram) err() error {
+ if len(h.Boundaries) <= 1 {
+ return nil
+ }
+
+ // Check boundaries are monotonic.
+ i := h.Boundaries[0]
+ for _, j := range h.Boundaries[1:] {
+ if i >= j {
+ return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries)
+ }
+ i = j
+ }
+
+ return nil
+}
+
+// copy returns a deep copy of h.
+func (h AggregationExplicitBucketHistogram) copy() Aggregation {
+ return AggregationExplicitBucketHistogram{
+ Boundaries: slices.Clone(h.Boundaries),
+ NoMinMax: h.NoMinMax,
+ }
+}
+
+// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of
+// measurements as an histogram with bucket widths that grow exponentially.
+type AggregationBase2ExponentialHistogram struct {
+ // MaxSize is the maximum number of buckets to use for the histogram.
+ MaxSize int32
+ // MaxScale is the maximum resolution scale to use for the histogram.
+ //
+ // MaxScale has a maximum value of 20. Using a value of 20 means the
+ // maximum number of buckets that can fit within the range of a
+ // signed 32-bit integer index could be used.
+ //
+ // MaxScale has a minimum value of -10. Using a value of -10 means only
+ // two buckets will be used.
+ MaxScale int32
+
+ // NoMinMax indicates whether to not record the min and max of the
+ // distribution. By default, these extrema are recorded.
+ //
+ // Recording these extrema for cumulative data is expected to have little
+ // value, they will represent the entire life of the instrument instead of
+ // just the current collection cycle. It is recommended to set this to true
+ // for that type of data to avoid computing the low-value extrema.
+ NoMinMax bool
+}
+
+var _ Aggregation = AggregationBase2ExponentialHistogram{}
+
+// copy returns a deep copy of the Aggregation.
+func (e AggregationBase2ExponentialHistogram) copy() Aggregation {
+ return e
+}
+
+const (
+ expoMaxScale = 20
+ expoMinScale = -10
+)
+
+// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms.
+var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg)
+
+// err returns an error for any misconfigured Aggregation.
+func (e AggregationBase2ExponentialHistogram) err() error {
+ if e.MaxScale > expoMaxScale {
+ return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale)
+ }
+ if e.MaxSize <= 0 {
+ return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
new file mode 100644
index 000000000..63b88f086
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
@@ -0,0 +1,83 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "sync"
+)
+
+// cache is a locking storage used to quickly return already computed values.
+//
+// The zero value of a cache is empty and ready to use.
+//
+// A cache must not be copied after first use.
+//
+// All methods of a cache are safe to call concurrently.
+type cache[K comparable, V any] struct {
+ sync.Mutex
+ data map[K]V
+}
+
+// Lookup returns the value stored in the cache with the associated key if it
+// exists. Otherwise, f is called and its returned value is set in the cache
+// for key and returned.
+//
+// Lookup is safe to call concurrently. It will hold the cache lock, so f
+// should not block excessively.
+func (c *cache[K, V]) Lookup(key K, f func() V) V {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.data == nil {
+ val := f()
+ c.data = map[K]V{key: val}
+ return val
+ }
+ if v, ok := c.data[key]; ok {
+ return v
+ }
+ val := f()
+ c.data[key] = val
+ return val
+}
+
+// HasKey returns true if Lookup has previously been called with that key
+//
+// HasKey is safe to call concurrently.
+func (c *cache[K, V]) HasKey(key K) bool {
+ c.Lock()
+ defer c.Unlock()
+ _, ok := c.data[key]
+ return ok
+}
+
+// cacheWithErr is a locking storage used to quickly return already computed values and an error.
+//
+// The zero value of a cacheWithErr is empty and ready to use.
+//
+// A cacheWithErr must not be copied after first use.
+//
+// All methods of a cacheWithErr are safe to call concurrently.
+type cacheWithErr[K comparable, V any] struct {
+ cache[K, valAndErr[V]]
+}
+
+type valAndErr[V any] struct {
+ val V
+ err error
+}
+
+// Lookup returns the value stored in the cacheWithErr with the associated key
+// if it exists. Otherwise, f is called and its returned value is set in the
+// cacheWithErr for key and returned.
+//
+// Lookup is safe to call concurrently. It will hold the cacheWithErr lock, so f
+// should not block excessively.
+func (c *cacheWithErr[K, V]) Lookup(key K, f func() (V, error)) (V, error) {
+ combined := c.cache.Lookup(key, func() valAndErr[V] {
+ val, err := f()
+ return valAndErr[V]{val: val, err: err}
+ })
+ return combined.val, combined.err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
new file mode 100644
index 000000000..c6440a134
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go
@@ -0,0 +1,206 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/sdk/metric/exemplar"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+// config contains configuration options for a MeterProvider.
+type config struct {
+ res *resource.Resource
+ readers []Reader
+ views []View
+ exemplarFilter exemplar.Filter
+ cardinalityLimit int
+}
+
+const defaultCardinalityLimit = 0
+
+// readerSignals returns a force-flush and shutdown function for a
+// MeterProvider to call in their respective options. All Readers c contains
+// will have their force-flush and shutdown methods unified into returned
+// single functions.
+func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) {
+ var fFuncs, sFuncs []func(context.Context) error
+ for _, r := range c.readers {
+ sFuncs = append(sFuncs, r.Shutdown)
+ if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok {
+ fFuncs = append(fFuncs, f.ForceFlush)
+ }
+ }
+
+ return unify(fFuncs), unifyShutdown(sFuncs)
+}
+
+// unify unifies calling all of funcs into a single function call. All errors
+// returned from calls to funcs will be unify into a single error return
+// value.
+func unify(funcs []func(context.Context) error) func(context.Context) error {
+ return func(ctx context.Context) error {
+ var err error
+ for _, f := range funcs {
+ if e := f(ctx); e != nil {
+ err = errors.Join(err, e)
+ }
+ }
+ return err
+ }
+}
+
+// unifyShutdown unifies calling all of funcs once for a shutdown. If called
+// more than once, an ErrReaderShutdown error is returned.
+func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error {
+ f := unify(funcs)
+ var once sync.Once
+ return func(ctx context.Context) error {
+ err := ErrReaderShutdown
+ once.Do(func() { err = f(ctx) })
+ return err
+ }
+}
+
+// newConfig returns a config configured with options.
+func newConfig(options []Option) config {
+ conf := config{
+ res: resource.Default(),
+ exemplarFilter: exemplar.TraceBasedFilter,
+ cardinalityLimit: cardinalityLimitFromEnv(),
+ }
+ for _, o := range meterProviderOptionsFromEnv() {
+ conf = o.apply(conf)
+ }
+ for _, o := range options {
+ conf = o.apply(conf)
+ }
+ return conf
+}
+
+// Option applies a configuration option value to a MeterProvider.
+type Option interface {
+ apply(config) config
+}
+
+// optionFunc applies a set of options to a config.
+type optionFunc func(config) config
+
+// apply returns a config with option(s) applied.
+func (o optionFunc) apply(conf config) config {
+ return o(conf)
+}
+
+// WithResource associates a Resource with a MeterProvider. This Resource
+// represents the entity producing telemetry and is associated with all Meters
+// the MeterProvider will create.
+//
+// By default, if this Option is not used, the default Resource from the
+// go.opentelemetry.io/otel/sdk/resource package will be used.
+func WithResource(res *resource.Resource) Option {
+ return optionFunc(func(conf config) config {
+ var err error
+ conf.res, err = resource.Merge(resource.Environment(), res)
+ if err != nil {
+ otel.Handle(err)
+ }
+ return conf
+ })
+}
+
+// WithReader associates Reader r with a MeterProvider.
+//
+// By default, if this option is not used, the MeterProvider will perform no
+// operations; no data will be exported without a Reader.
+func WithReader(r Reader) Option {
+ return optionFunc(func(cfg config) config {
+ if r == nil {
+ return cfg
+ }
+ cfg.readers = append(cfg.readers, r)
+ return cfg
+ })
+}
+
+// WithView associates views with a MeterProvider.
+//
+// Views are appended to existing ones in a MeterProvider if this option is
+// used multiple times.
+//
+// By default, if this option is not used, the MeterProvider will use the
+// default view.
+func WithView(views ...View) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.views = append(cfg.views, views...)
+ return cfg
+ })
+}
+
+// WithExemplarFilter configures the exemplar filter.
+//
+// The exemplar filter determines which measurements are offered to the
+// exemplar reservoir, but the exemplar reservoir makes the final decision of
+// whether to store an exemplar.
+//
+// By default, the [exemplar.SampledFilter]
+// is used. Exemplars can be entirely disabled by providing the
+// [exemplar.AlwaysOffFilter].
+func WithExemplarFilter(filter exemplar.Filter) Option {
+ return optionFunc(func(cfg config) config {
+ cfg.exemplarFilter = filter
+ return cfg
+ })
+}
+
+// WithCardinalityLimit sets the cardinality limit for the MeterProvider.
+//
+// The cardinality limit is the hard limit on the number of metric datapoints
+// that can be collected for a single instrument in a single collect cycle.
+//
+// Setting this to a zero or negative value means no limit is applied.
+func WithCardinalityLimit(limit int) Option {
+ // For backward compatibility, the environment variable `OTEL_GO_X_CARDINALITY_LIMIT`
+ // can also be used to set this value.
+ return optionFunc(func(cfg config) config {
+ cfg.cardinalityLimit = limit
+ return cfg
+ })
+}
+
+func meterProviderOptionsFromEnv() []Option {
+ var opts []Option
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar
+ const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER"
+
+ switch strings.ToLower(strings.TrimSpace(os.Getenv(filterEnvKey))) {
+ case "always_on":
+ opts = append(opts, WithExemplarFilter(exemplar.AlwaysOnFilter))
+ case "always_off":
+ opts = append(opts, WithExemplarFilter(exemplar.AlwaysOffFilter))
+ case "trace_based":
+ opts = append(opts, WithExemplarFilter(exemplar.TraceBasedFilter))
+ }
+ return opts
+}
+
+func cardinalityLimitFromEnv() int {
+ const cardinalityLimitKey = "OTEL_GO_X_CARDINALITY_LIMIT"
+ v := strings.TrimSpace(os.Getenv(cardinalityLimitKey))
+ if v == "" {
+ return defaultCardinalityLimit
+ }
+ n, err := strconv.Atoi(v)
+ if err != nil {
+ otel.Handle(err)
+ return defaultCardinalityLimit
+ }
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
new file mode 100644
index 000000000..0f3b9d623
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
@@ -0,0 +1,71 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package metric provides an implementation of the OpenTelemetry metrics SDK.
+//
+// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information
+// about the concept of OpenTelemetry metrics and
+// https://opentelemetry.io/docs/concepts/components/ for more information
+// about OpenTelemetry SDKs.
+//
+// The entry point for the metric package is the MeterProvider. It is the
+// object that all API calls use to create Meters, instruments, and ultimately
+// make metric measurements. Also, it is an object that should be used to
+// control the life-cycle (start, flush, and shutdown) of the SDK.
+//
+// A MeterProvider needs to be configured to export the measured data, this is
+// done by configuring it with a Reader implementation (using the WithReader
+// MeterProviderOption). Readers take two forms: ones that push to an endpoint
+// (NewPeriodicReader), and ones that an endpoint pulls from. See
+// [go.opentelemetry.io/otel/exporters] for exporters that can be used as
+// or with these Readers.
+//
+// Each Reader, when registered with the MeterProvider, can be augmented with a
+// View. Views allow users that run OpenTelemetry instrumented code to modify
+// the generated data of that instrumentation.
+//
+// The data generated by a MeterProvider needs to include information about its
+// origin. A MeterProvider needs to be configured with a Resource, using the
+// WithResource MeterProviderOption, to include this information. This Resource
+// should be used to describe the unique runtime environment instrumented code
+// is being run on. That way when multiple instances of the code are collected
+// at a single endpoint their origin is decipherable.
+//
+// To avoid leaking memory, the SDK returns the same instrument for calls to
+// create new instruments with the same Name, Unit, and Description.
+// Importantly, callbacks provided using metric.WithFloat64Callback or
+// metric.WithInt64Callback will only apply for the first instrument created
+// with a given Name, Unit, and Description. Instead, use
+// Meter.RegisterCallback and Registration.Unregister to add and remove
+// callbacks without leaking memory.
+//
+// # Cardinality Limits
+//
+// Cardinality refers to the number of unique attributes collected. High cardinality can lead to
+// excessive memory usage, increased storage costs, and backend performance issues.
+//
+// Currently, the OpenTelemetry Go Metric SDK does not enforce a cardinality limit by default
+// (note that this may change in a future release). Use [WithCardinalityLimit] to set the
+// cardinality limit as desired.
+//
+// New attribute sets are dropped when the cardinality limit is reached. The measurement of
+// these sets are aggregated into
+// a special attribute set containing attribute.Bool("otel.metric.overflow", true).
+// This ensures total metric values (e.g., Sum, Count) remain correct for the
+// collection cycle, but information about the specific dropped sets
+// is not preserved.
+//
+// Recommendations:
+//
+// - Set the limit based on the theoretical maximum combinations or expected
+// active combinations. The OpenTelemetry Specification recommends a default of 2000.
+// - A too high of a limit increases worst-case memory overhead in the SDK and may cause downstream
+// issues for databases that cannot handle high cardinality.
+// - A too low of a limit causes loss of attribute detail as more data falls into overflow.
+//
+// See [go.opentelemetry.io/otel/metric] for more information about
+// the metric API.
+//
+// See [go.opentelemetry.io/otel/sdk/metric/internal/x] for information about
+// the experimental features.
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/env.go b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
new file mode 100644
index 000000000..a6c403797
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/env.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "os"
+ "strconv"
+ "time"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Environment variable names.
+const (
+ // The time interval (in milliseconds) between the start of two export attempts.
+ envInterval = "OTEL_METRIC_EXPORT_INTERVAL"
+ // Maximum allowed time (in milliseconds) to export data.
+ envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT"
+)
+
+// envDuration returns an environment variable's value as duration in milliseconds if it is exists,
+// or the defaultValue if the environment variable is not defined or the value is not valid.
+func envDuration(key string, defaultValue time.Duration) time.Duration {
+ v := os.Getenv(key)
+ if v == "" {
+ return defaultValue
+ }
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "environment variable", key, "value", v)
+ return defaultValue
+ }
+ if d <= 0 {
+ global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v)
+ return defaultValue
+ }
+ return time.Duration(d) * time.Millisecond
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
new file mode 100644
index 000000000..38b8745e6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "runtime"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+)
+
+// ExemplarReservoirProviderSelector selects the
+// [exemplar.ReservoirProvider] to use
+// based on the [Aggregation] of the metric.
+type ExemplarReservoirProviderSelector func(Aggregation) exemplar.ReservoirProvider
+
+// reservoirFunc returns the appropriately configured exemplar reservoir
+// creation func based on the passed InstrumentKind and filter configuration.
+func reservoirFunc[N int64 | float64](
+ provider exemplar.ReservoirProvider,
+ filter exemplar.Filter,
+) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] {
+ return func(attrs attribute.Set) aggregate.FilteredExemplarReservoir[N] {
+ return aggregate.NewFilteredExemplarReservoir[N](filter, provider(attrs))
+ }
+}
+
+// DefaultExemplarReservoirProviderSelector returns the default
+// [exemplar.ReservoirProvider] for the
+// provided [Aggregation].
+//
+// For explicit bucket histograms with more than 1 bucket, it uses the
+// [exemplar.HistogramReservoirProvider].
+// For exponential histograms, it uses the
+// [exemplar.FixedSizeReservoirProvider]
+// with a size of min(20, max_buckets).
+// For all other aggregations, it uses the
+// [exemplar.FixedSizeReservoirProvider]
+// with a size equal to the number of CPUs.
+//
+// Exemplar default reservoirs MAY change in a minor version bump. No
+// guarantees are made on the shape or statistical properties of returned
+// exemplars.
+func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.ReservoirProvider {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults
+ // Explicit bucket histogram aggregation with more than 1 bucket will
+ // use AlignedHistogramBucketExemplarReservoir.
+ a, ok := agg.(AggregationExplicitBucketHistogram)
+ if ok && len(a.Boundaries) > 0 {
+ return exemplar.HistogramReservoirProvider(a.Boundaries)
+ }
+
+ var n int
+ if a, ok := agg.(AggregationBase2ExponentialHistogram); ok {
+ // Base2 Exponential Histogram Aggregation SHOULD use a
+ // SimpleFixedSizeExemplarReservoir with a reservoir equal to the
+ // smaller of the maximum number of buckets configured on the
+ // aggregation or twenty (e.g. min(20, max_buckets)).
+ n = min(int(a.MaxSize), 20)
+ } else {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
+ // This Exemplar reservoir MAY take a configuration parameter for
+ // the size of the reservoir. If no size configuration is
+ // provided, the default size MAY be the number of possible
+ // concurrent threads (e.g. number of CPUs) to help reduce
+ // contention. Otherwise, a default size of 1 SHOULD be used.
+ //
+ // Use runtime.GOMAXPROCS instead of runtime.NumCPU to support
+ // containerized environments that may have less than the total number
+ // of logical CPUs available on the local machine allocated to it.
+ n = max(runtime.GOMAXPROCS(0), 1)
+ }
+
+ return exemplar.FixedSizeReservoirProvider(n)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md
new file mode 100644
index 000000000..d1025f5eb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md
@@ -0,0 +1,3 @@
+# Metric SDK Exemplars
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/exemplar)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go
new file mode 100644
index 000000000..9f2389376
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go
@@ -0,0 +1,6 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package exemplar provides an implementation of the OpenTelemetry exemplar
+// reservoir to be used in metric collection pipelines.
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go
new file mode 100644
index 000000000..1ab694678
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Exemplar is a measurement sampled from a timeseries providing a typical
+// example.
+type Exemplar struct {
+ // FilteredAttributes are the attributes recorded with the measurement but
+ // filtered out of the timeseries' aggregated data.
+ FilteredAttributes []attribute.KeyValue
+ // Time is the time when the measurement was recorded.
+ Time time.Time
+ // Value is the measured value.
+ Value Value
+ // SpanID is the ID of the span that was active during the measurement. If
+ // no span was active or the span was not sampled this will be empty.
+ SpanID []byte `json:",omitempty"`
+ // TraceID is the ID of the trace the active span belonged to during the
+ // measurement. If no span was active or the span was not sampled this will
+ // be empty.
+ TraceID []byte `json:",omitempty"`
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go
new file mode 100644
index 000000000..b50f5c153
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go
@@ -0,0 +1,34 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Filter determines if a measurement should be offered.
+//
+// The passed ctx needs to contain any baggage or span that were active
+// when the measurement was made. This information may be used by the
+// Reservoir in making a sampling decision.
+type Filter func(context.Context) bool
+
+// TraceBasedFilter is a [Filter] that will only offer measurements
+// if the passed context associated with the measurement contains a sampled
+// [go.opentelemetry.io/otel/trace.SpanContext].
+func TraceBasedFilter(ctx context.Context) bool {
+ return trace.SpanContextFromContext(ctx).IsSampled()
+}
+
+// AlwaysOnFilter is a [Filter] that always offers measurements.
+func AlwaysOnFilter(context.Context) bool {
+ return true
+}
+
+// AlwaysOffFilter is a [Filter] that never offers measurements.
+func AlwaysOffFilter(context.Context) bool {
+ return false
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go
new file mode 100644
index 000000000..08e8f68fe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go
@@ -0,0 +1,197 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
+
+import (
+ "context"
+ "math"
+ "math/rand/v2"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir].
+func FixedSizeReservoirProvider(k int) ReservoirProvider {
+ return func(attribute.Set) Reservoir {
+ return NewFixedSizeReservoir(k)
+ }
+}
+
+// NewFixedSizeReservoir returns a [FixedSizeReservoir] that samples at most
+// k exemplars. If there are k or less measurements made, the Reservoir will
+// sample each one. If there are more than k, the Reservoir will then randomly
+// sample all additional measurement with a decreasing probability.
+func NewFixedSizeReservoir(k int) *FixedSizeReservoir {
+ return newFixedSizeReservoir(newStorage(k))
+}
+
+var _ Reservoir = &FixedSizeReservoir{}
+
+// FixedSizeReservoir is a [Reservoir] that samples at most k exemplars. If
+// there are k or less measurements made, the Reservoir will sample each one.
+// If there are more than k, the Reservoir will then randomly sample all
+// additional measurement with a decreasing probability.
+type FixedSizeReservoir struct {
+ *storage
+
+ // count is the number of measurement seen.
+ count int64
+ // next is the next count that will store a measurement at a random index
+ // once the reservoir has been filled.
+ next int64
+ // w is the largest random number in a distribution that is used to compute
+ // the next next.
+ w float64
+}
+
+func newFixedSizeReservoir(s *storage) *FixedSizeReservoir {
+ r := &FixedSizeReservoir{
+ storage: s,
+ }
+ r.reset()
+ return r
+}
+
+// randomFloat64 returns, as a float64, a uniform pseudo-random number in the
+// open interval (0.0,1.0).
+func (*FixedSizeReservoir) randomFloat64() float64 {
+ // TODO: Use an algorithm that avoids rejection sampling. For example:
+ //
+ // const precision = 1 << 53 // 2^53
+ // // Generate an integer in [1, 2^53 - 1]
+ // v := rand.Uint64() % (precision - 1) + 1
+ // return float64(v) / float64(precision)
+ f := rand.Float64()
+ for f == 0 {
+ f = rand.Float64()
+ }
+ return f
+}
+
+// Offer accepts the parameters associated with a measurement. The
+// parameters will be stored as an exemplar if the Reservoir decides to
+// sample the measurement.
+//
+// The passed ctx needs to contain any baggage or span that were active
+// when the measurement was made. This information may be used by the
+// Reservoir in making a sampling decision.
+//
+// The time t is the time when the measurement was made. The v and a
+// parameters are the value and dropped (filtered) attributes of the
+// measurement respectively.
+func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) {
+ // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December
+ // 1994). "Reservoir-Sampling Algorithms of Time Complexity
+ // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4):
+ // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435).
+ //
+ // A high-level overview of "Algorithm L":
+ // 0) Pre-calculate the random count greater than the storage size when
+ // an exemplar will be replaced.
+ // 1) Accept all measurements offered until the configured storage size is
+ // reached.
+ // 2) Loop:
+ // a) When the pre-calculate count is reached, replace a random
+ // existing exemplar with the offered measurement.
+ // b) Calculate the next random count greater than the existing one
+ // which will replace another exemplars
+ //
+ // The way a "replacement" count is computed is by looking at `n` number of
+ // independent random numbers each corresponding to an offered measurement.
+ // Of these numbers the smallest `k` (the same size as the storage
+ // capacity) of them are kept as a subset. The maximum value in this
+ // subset, called `w` is used to weight another random number generation
+ // for the next count that will be considered.
+ //
+ // By weighting the next count computation like described, it is able to
+ // perform a uniformly-weighted sampling algorithm based on the number of
+ // samples the reservoir has seen so far. The sampling will "slow down" as
+ // more and more samples are offered so as to reduce a bias towards those
+ // offered just prior to the end of the collection.
+ //
+ // This algorithm is preferred because of its balance of simplicity and
+ // performance. It will compute three random numbers (the bulk of
+ // computation time) for each item that becomes part of the reservoir, but
+ // it does not spend any time on items that do not. In particular it has an
+ // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of
+ // measurements offered and k is the reservoir size.
+ //
+ // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of
+ // this and other reservoir sampling algorithms. See
+ // https://github.com/MrAlias/reservoir-sampling for a performance
+ // comparison of reservoir sampling algorithms.
+
+ if int(r.count) < cap(r.store) {
+ r.store[r.count] = newMeasurement(ctx, t, n, a)
+ } else if r.count == r.next {
+ // Overwrite a random existing measurement with the one offered.
+ idx := int(rand.Int64N(int64(cap(r.store))))
+ r.store[idx] = newMeasurement(ctx, t, n, a)
+ r.advance()
+ }
+ r.count++
+}
+
+// reset resets r to the initial state.
+func (r *FixedSizeReservoir) reset() {
+ // This resets the number of exemplars known.
+ r.count = 0
+ // Random index inserts should only happen after the storage is full.
+ r.next = int64(cap(r.store))
+
+ // Initial random number in the series used to generate r.next.
+ //
+ // This is set before r.advance to reset or initialize the random number
+ // series. Without doing so it would always be 0 or never restart a new
+ // random number series.
+ //
+ // This maps the uniform random number in (0,1) to a geometric distribution
+ // over the same interval. The mean of the distribution is inversely
+ // proportional to the storage capacity.
+ r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
+
+ r.advance()
+}
+
+// advance updates the count at which the offered measurement will overwrite an
+// existing exemplar.
+func (r *FixedSizeReservoir) advance() {
+ // Calculate the next value in the random number series.
+ //
+ // The current value of r.w is based on the max of a distribution of random
+ // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity
+ // of the storage and each `u` in the interval (0,w)). To calculate the
+ // next r.w we use the fact that when the next exemplar is selected to be
+ // included in the storage an existing one will be dropped, and the
+ // corresponding random number in the set used to calculate r.w will also
+ // be replaced. The replacement random number will also be within (0,w),
+ // therefore the next r.w will be based on the same distribution (i.e.
+ // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
+ // computing the next random number `u` and take r.w as `w * u^(1/k)`.
+ r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
+ // Use the new random number in the series to calculate the count of the
+ // next measurement that will be stored.
+ //
+ // Given 0 < r.w < 1, each iteration will result in subsequent r.w being
+ // smaller. This translates here into the next next being selected against
+ // a distribution with a higher mean (i.e. the expected value will increase
+ // and replacements become less likely)
+ //
+ // Important to note, the new r.next will always be at least 1 more than
+ // the last r.next.
+ r.next += int64(math.Log(r.randomFloat64())/math.Log(1-r.w)) + 1
+}
+
+// Collect returns all the held exemplars.
+//
+// The Reservoir state is preserved after this call.
+func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) {
+ r.storage.Collect(dest)
+ // Call reset here even though it will reset r.count and restart the random
+ // number series. This will persist any old exemplars as long as no new
+ // measurements are offered, but it will also prioritize those new
+ // measurements that are made over the older collection cycle ones.
+ r.reset()
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
new file mode 100644
index 000000000..decab613e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
@@ -0,0 +1,70 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
+
+import (
+ "context"
+ "slices"
+ "sort"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// HistogramReservoirProvider is a provider of [HistogramReservoir].
+func HistogramReservoirProvider(bounds []float64) ReservoirProvider {
+ cp := slices.Clone(bounds)
+ slices.Sort(cp)
+ return func(attribute.Set) Reservoir {
+ return NewHistogramReservoir(cp)
+ }
+}
+
+// NewHistogramReservoir returns a [HistogramReservoir] that samples the last
+// measurement that falls within a histogram bucket. The histogram bucket
+// upper-boundaries are define by bounds.
+//
+// The passed bounds must be sorted before calling this function.
+func NewHistogramReservoir(bounds []float64) *HistogramReservoir {
+ return &HistogramReservoir{
+ bounds: bounds,
+ storage: newStorage(len(bounds) + 1),
+ }
+}
+
+var _ Reservoir = &HistogramReservoir{}
+
+// HistogramReservoir is a [Reservoir] that samples the last measurement that
+// falls within a histogram bucket. The histogram bucket upper-boundaries are
+// define by bounds.
+type HistogramReservoir struct {
+ *storage
+
+ // bounds are bucket bounds in ascending order.
+ bounds []float64
+}
+
+// Offer accepts the parameters associated with a measurement. The
+// parameters will be stored as an exemplar if the Reservoir decides to
+// sample the measurement.
+//
+// The passed ctx needs to contain any baggage or span that were active
+// when the measurement was made. This information may be used by the
+// Reservoir in making a sampling decision.
+//
+// The time t is the time when the measurement was made. The v and a
+// parameters are the value and dropped (filtered) attributes of the
+// measurement respectively.
+func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) {
+ var x float64
+ switch v.Type() {
+ case Int64ValueType:
+ x = float64(v.Int64())
+ case Float64ValueType:
+ x = v.Float64()
+ default:
+ panic("unknown value type")
+ }
+ r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go
new file mode 100644
index 000000000..ba5cd1a6b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Reservoir holds the sampled exemplar of measurements made.
+type Reservoir interface {
+ // Offer accepts the parameters associated with a measurement. The
+ // parameters will be stored as an exemplar if the Reservoir decides to
+ // sample the measurement.
+ //
+ // The passed ctx needs to contain any baggage or span that were active
+ // when the measurement was made. This information may be used by the
+ // Reservoir in making a sampling decision.
+ //
+ // The time t is the time when the measurement was made. The val and attr
+ // parameters are the value and dropped (filtered) attributes of the
+ // measurement respectively.
+ Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue)
+
+ // Collect returns all the held exemplars.
+ //
+ // The Reservoir state is preserved after this call.
+ Collect(dest *[]Exemplar)
+}
+
+// ReservoirProvider creates new [Reservoir]s.
+//
+// The attributes provided are attributes which are kept by the aggregation, and
+// are exclusive with attributes passed to Offer. The combination of these
+// attributes and the attributes passed to Offer is the complete set of
+// attributes a measurement was made with.
+type ReservoirProvider func(attr attribute.Set) Reservoir
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go
new file mode 100644
index 000000000..0e2e26dfb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go
@@ -0,0 +1,95 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// storage is an exemplar storage for [Reservoir] implementations.
+type storage struct {
+ // store are the measurements sampled.
+ //
+ // This does not use []metricdata.Exemplar because it potentially would
+ // require an allocation for trace and span IDs in the hot path of Offer.
+ store []measurement
+}
+
+func newStorage(n int) *storage {
+ return &storage{store: make([]measurement, n)}
+}
+
+// Collect returns all the held exemplars.
+//
+// The Reservoir state is preserved after this call.
+func (r *storage) Collect(dest *[]Exemplar) {
+ *dest = reset(*dest, len(r.store), len(r.store))
+ var n int
+ for _, m := range r.store {
+ if !m.valid {
+ continue
+ }
+
+ m.exemplar(&(*dest)[n])
+ n++
+ }
+ *dest = (*dest)[:n]
+}
+
+// measurement is a measurement made by a telemetry system.
+type measurement struct {
+ // FilteredAttributes are the attributes dropped during the measurement.
+ FilteredAttributes []attribute.KeyValue
+ // Time is the time when the measurement was made.
+ Time time.Time
+ // Value is the value of the measurement.
+ Value Value
+ // SpanContext is the SpanContext active when a measurement was made.
+ SpanContext trace.SpanContext
+
+ valid bool
+}
+
+// newMeasurement returns a new non-empty Measurement.
+func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement {
+ return measurement{
+ FilteredAttributes: droppedAttr,
+ Time: ts,
+ Value: v,
+ SpanContext: trace.SpanContextFromContext(ctx),
+ valid: true,
+ }
+}
+
+// exemplar returns m as an [Exemplar].
+func (m measurement) exemplar(dest *Exemplar) {
+ dest.FilteredAttributes = m.FilteredAttributes
+ dest.Time = m.Time
+ dest.Value = m.Value
+
+ if m.SpanContext.HasTraceID() {
+ traceID := m.SpanContext.TraceID()
+ dest.TraceID = traceID[:]
+ } else {
+ dest.TraceID = dest.TraceID[:0]
+ }
+
+ if m.SpanContext.HasSpanID() {
+ spanID := m.SpanContext.SpanID()
+ dest.SpanID = spanID[:]
+ } else {
+ dest.SpanID = dest.SpanID[:0]
+ }
+}
+
+func reset[T any](s []T, length, capacity int) []T {
+ if cap(s) < capacity {
+ return make([]T, length, capacity)
+ }
+ return s[:length]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go
new file mode 100644
index 000000000..590b089a8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
+
+import "math"
+
+// ValueType identifies the type of value used in exemplar data.
+type ValueType uint8
+
+const (
+ // UnknownValueType should not be used. It represents a misconfigured
+ // Value.
+ UnknownValueType ValueType = 0
+ // Int64ValueType represents a Value with int64 data.
+ Int64ValueType ValueType = 1
+ // Float64ValueType represents a Value with float64 data.
+ Float64ValueType ValueType = 2
+)
+
+// Value is the value of data held by an exemplar.
+type Value struct {
+ t ValueType
+ val uint64
+}
+
+// NewValue returns a new [Value] for the provided value.
+func NewValue[N int64 | float64](value N) Value {
+ switch v := any(value).(type) {
+ case int64:
+ // This can be later converted back to int64 (overflow not checked).
+ return Value{t: Int64ValueType, val: uint64(v)} // nolint:gosec
+ case float64:
+ return Value{t: Float64ValueType, val: math.Float64bits(v)}
+ }
+ return Value{}
+}
+
+// Type returns the [ValueType] of data held by v.
+func (v Value) Type() ValueType { return v.t }
+
+// Int64 returns the value of v as an int64. If the ValueType of v is not an
+// Int64ValueType, 0 is returned.
+func (v Value) Int64() int64 {
+ if v.t == Int64ValueType {
+ // Assumes the correct int64 was stored in v.val based on type.
+ return int64(v.val) // nolint: gosec
+ }
+ return 0
+}
+
+// Float64 returns the value of v as an float64. If the ValueType of v is not
+// an Float64ValueType, 0 is returned.
+func (v Value) Float64() float64 {
+ if v.t == Float64ValueType {
+ return math.Float64frombits(v.val)
+ }
+ return 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
new file mode 100644
index 000000000..1969cb42c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// ErrExporterShutdown is returned if Export or Shutdown are called after an
+// Exporter has been Shutdown.
+var ErrExporterShutdown = errors.New("exporter is shutdown")
+
+// Exporter handles the delivery of metric data to external receivers. This is
+// the final component in the metric push pipeline.
+type Exporter interface {
+ // Temporality returns the Temporality to use for an instrument kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Exporter methods.
+ Temporality(InstrumentKind) metricdata.Temporality
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Aggregation returns the Aggregation to use for an instrument kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Exporter methods.
+ Aggregation(InstrumentKind) Aggregation
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Export serializes and transmits metric data to a receiver.
+ //
+ // This is called synchronously, there is no concurrency safety
+ // requirement. Because of this, it is critical that all timeouts and
+ // cancellations of the passed context be honored.
+ //
+ // All retry logic must be contained in this function. The SDK does not
+ // implement any retry logic. All errors returned by this function are
+ // considered unrecoverable and will be reported to a configured error
+ // Handler.
+ //
+ // The passed ResourceMetrics may be reused when the call completes. If an
+ // exporter needs to hold this data after it returns, it needs to make a
+ // copy.
+ Export(context.Context, *metricdata.ResourceMetrics) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ForceFlush flushes any metric data held by an exporter.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // This method needs to be concurrent safe.
+ ForceFlush(context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown flushes all metric data held by an exporter and releases any
+ // held computational resources.
+ //
+ // The deadline or cancellation of the passed context must be honored. An
+ // appropriate error should be returned in these situations.
+ //
+ // After Shutdown is called, calls to Export will perform no operation and
+ // instead will return an error indicating the shutdown state.
+ //
+ // This method needs to be concurrent safe.
+ Shutdown(context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
new file mode 100644
index 000000000..63cccc508
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
@@ -0,0 +1,368 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+ "go.opentelemetry.io/otel/sdk/metric/internal/x"
+)
+
+var zeroScope instrumentation.Scope
+
+// InstrumentKind is the identifier of a group of instruments that all
+// performing the same function.
+type InstrumentKind uint8
+
+const (
+ // instrumentKindUndefined is an undefined instrument kind, it should not
+ // be used by any initialized type.
+ instrumentKindUndefined InstrumentKind = 0 // nolint:unused
+ // InstrumentKindCounter identifies a group of instruments that record
+ // increasing values synchronously with the code path they are measuring.
+ InstrumentKindCounter InstrumentKind = 1
+ // InstrumentKindUpDownCounter identifies a group of instruments that
+ // record increasing and decreasing values synchronously with the code path
+ // they are measuring.
+ InstrumentKindUpDownCounter InstrumentKind = 2
+ // InstrumentKindHistogram identifies a group of instruments that record a
+ // distribution of values synchronously with the code path they are
+ // measuring.
+ InstrumentKindHistogram InstrumentKind = 3
+ // InstrumentKindObservableCounter identifies a group of instruments that
+ // record increasing values in an asynchronous callback.
+ InstrumentKindObservableCounter InstrumentKind = 4
+ // InstrumentKindObservableUpDownCounter identifies a group of instruments
+ // that record increasing and decreasing values in an asynchronous
+ // callback.
+ InstrumentKindObservableUpDownCounter InstrumentKind = 5
+ // InstrumentKindObservableGauge identifies a group of instruments that
+ // record current values in an asynchronous callback.
+ InstrumentKindObservableGauge InstrumentKind = 6
+ // InstrumentKindGauge identifies a group of instruments that record
+ // instantaneous values synchronously with the code path they are
+ // measuring.
+ InstrumentKindGauge InstrumentKind = 7
+)
+
+type nonComparable [0]func() // nolint: unused // This is indeed used.
+
+// Instrument describes properties an instrument is created with.
+type Instrument struct {
+ // Name is the human-readable identifier of the instrument.
+ Name string
+ // Description describes the purpose of the instrument.
+ Description string
+ // Kind defines the functional group of the instrument.
+ Kind InstrumentKind
+ // Unit is the unit of measurement recorded by the instrument.
+ Unit string
+ // Scope identifies the instrumentation that created the instrument.
+ Scope instrumentation.Scope
+
+ // Ensure forward compatibility if non-comparable fields need to be added.
+ nonComparable // nolint: unused
+}
+
+// IsEmpty reports whether all Instrument fields are their zero-value.
+func (i Instrument) IsEmpty() bool {
+ return i.Name == "" &&
+ i.Description == "" &&
+ i.Kind == instrumentKindUndefined &&
+ i.Unit == "" &&
+ i.Scope == zeroScope
+}
+
+// matches returns whether all the non-zero-value fields of i match the
+// corresponding fields of other. If i is empty it will match all other, and
+// true will always be returned.
+func (i Instrument) matches(other Instrument) bool {
+ return i.matchesName(other) &&
+ i.matchesDescription(other) &&
+ i.matchesKind(other) &&
+ i.matchesUnit(other) &&
+ i.matchesScope(other)
+}
+
+// matchesName returns true if the Name of i is "" or it equals the Name of
+// other, otherwise false.
+func (i Instrument) matchesName(other Instrument) bool {
+ return i.Name == "" || i.Name == other.Name
+}
+
+// matchesDescription returns true if the Description of i is "" or it equals
+// the Description of other, otherwise false.
+func (i Instrument) matchesDescription(other Instrument) bool {
+ return i.Description == "" || i.Description == other.Description
+}
+
+// matchesKind returns true if the Kind of i is its zero-value or it equals the
+// Kind of other, otherwise false.
+func (i Instrument) matchesKind(other Instrument) bool {
+ return i.Kind == instrumentKindUndefined || i.Kind == other.Kind
+}
+
+// matchesUnit returns true if the Unit of i is its zero-value or it equals the
+// Unit of other, otherwise false.
+func (i Instrument) matchesUnit(other Instrument) bool {
+ return i.Unit == "" || i.Unit == other.Unit
+}
+
+// matchesScope returns true if the Scope of i is its zero-value or it equals
+// the Scope of other, otherwise false.
+func (i Instrument) matchesScope(other Instrument) bool {
+ return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) &&
+ (i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) &&
+ (i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL)
+}
+
+// Stream describes the stream of data an instrument produces.
+type Stream struct {
+ // Name is the human-readable identifier of the stream.
+ Name string
+ // Description describes the purpose of the data.
+ Description string
+ // Unit is the unit of measurement recorded.
+ Unit string
+ // Aggregation the stream uses for an instrument.
+ Aggregation Aggregation
+ // AttributeFilter is an attribute Filter applied to the attributes
+ // recorded for an instrument's measurement. If the filter returns false
+ // the attribute will not be recorded, otherwise, if it returns true, it
+ // will record the attribute.
+ //
+ // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to
+ // provide an allow-list of attribute keys here.
+ AttributeFilter attribute.Filter
+ // ExemplarReservoirProvider selects the
+ // [go.opentelemetry.io/otel/sdk/metric/exemplar.ReservoirProvider] based
+ // on the [Aggregation].
+ //
+ // If unspecified, [DefaultExemplarReservoirProviderSelector] is used.
+ ExemplarReservoirProviderSelector ExemplarReservoirProviderSelector
+}
+
+// instID are the identifying properties of a instrument.
+type instID struct {
+ // Name is the name of the stream.
+ Name string
+ // Description is the description of the stream.
+ Description string
+ // Kind defines the functional group of the instrument.
+ Kind InstrumentKind
+ // Unit is the unit of the stream.
+ Unit string
+ // Number is the number type of the stream.
+ Number string
+}
+
+// Returns a normalized copy of the instID i.
+//
+// Instrument names are considered case-insensitive. Standardize the instrument
+// name to always be lowercase for the returned instID so it can be compared
+// without the name casing affecting the comparison.
+func (i instID) normalize() instID {
+ i.Name = strings.ToLower(i.Name)
+ return i
+}
+
+type int64Inst struct {
+ measures []aggregate.Measure[int64]
+
+ embedded.Int64Counter
+ embedded.Int64UpDownCounter
+ embedded.Int64Histogram
+ embedded.Int64Gauge
+}
+
+var (
+ _ metric.Int64Counter = (*int64Inst)(nil)
+ _ metric.Int64UpDownCounter = (*int64Inst)(nil)
+ _ metric.Int64Histogram = (*int64Inst)(nil)
+ _ metric.Int64Gauge = (*int64Inst)(nil)
+ _ x.EnabledInstrument = (*int64Inst)(nil)
+)
+
+func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) {
+ c := metric.NewAddConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) {
+ c := metric.NewRecordConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *int64Inst) Enabled(context.Context) bool {
+ return len(i.measures) != 0
+}
+
+func (i *int64Inst) aggregate(
+ ctx context.Context,
+ val int64,
+ s attribute.Set,
+) { // nolint:revive // okay to shadow pkg with method.
+ for _, in := range i.measures {
+ in(ctx, val, s)
+ }
+}
+
+type float64Inst struct {
+ measures []aggregate.Measure[float64]
+
+ embedded.Float64Counter
+ embedded.Float64UpDownCounter
+ embedded.Float64Histogram
+ embedded.Float64Gauge
+}
+
+var (
+ _ metric.Float64Counter = (*float64Inst)(nil)
+ _ metric.Float64UpDownCounter = (*float64Inst)(nil)
+ _ metric.Float64Histogram = (*float64Inst)(nil)
+ _ metric.Float64Gauge = (*float64Inst)(nil)
+ _ x.EnabledInstrument = (*float64Inst)(nil)
+)
+
+func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) {
+ c := metric.NewAddConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) {
+ c := metric.NewRecordConfig(opts)
+ i.aggregate(ctx, val, c.Attributes())
+}
+
+func (i *float64Inst) Enabled(context.Context) bool {
+ return len(i.measures) != 0
+}
+
+func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) {
+ for _, in := range i.measures {
+ in(ctx, val, s)
+ }
+}
+
+// observableID is a comparable unique identifier of an observable.
+type observableID[N int64 | float64] struct {
+ name string
+ description string
+ kind InstrumentKind
+ unit string
+ scope instrumentation.Scope
+}
+
+type float64Observable struct {
+ metric.Float64Observable
+ *observable[float64]
+
+ embedded.Float64ObservableCounter
+ embedded.Float64ObservableUpDownCounter
+ embedded.Float64ObservableGauge
+}
+
+var (
+ _ metric.Float64ObservableCounter = float64Observable{}
+ _ metric.Float64ObservableUpDownCounter = float64Observable{}
+ _ metric.Float64ObservableGauge = float64Observable{}
+)
+
+func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string) float64Observable {
+ return float64Observable{
+ observable: newObservable[float64](m, kind, name, desc, u),
+ }
+}
+
+type int64Observable struct {
+ metric.Int64Observable
+ *observable[int64]
+
+ embedded.Int64ObservableCounter
+ embedded.Int64ObservableUpDownCounter
+ embedded.Int64ObservableGauge
+}
+
+var (
+ _ metric.Int64ObservableCounter = int64Observable{}
+ _ metric.Int64ObservableUpDownCounter = int64Observable{}
+ _ metric.Int64ObservableGauge = int64Observable{}
+)
+
+func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int64Observable {
+ return int64Observable{
+ observable: newObservable[int64](m, kind, name, desc, u),
+ }
+}
+
+type observable[N int64 | float64] struct {
+ metric.Observable
+ observableID[N]
+
+ meter *meter
+ measures measures[N]
+ dropAggregation bool
+}
+
+func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] {
+ return &observable[N]{
+ observableID: observableID[N]{
+ name: name,
+ description: desc,
+ kind: kind,
+ unit: u,
+ scope: m.scope,
+ },
+ meter: m,
+ }
+}
+
+// observe records the val for the set of attrs.
+func (o *observable[N]) observe(val N, s attribute.Set) {
+ o.measures.observe(val, s)
+}
+
+func (o *observable[N]) appendMeasures(meas []aggregate.Measure[N]) {
+ o.measures = append(o.measures, meas...)
+}
+
+type measures[N int64 | float64] []aggregate.Measure[N]
+
+// observe records the val for the set of attrs.
+func (m measures[N]) observe(val N, s attribute.Set) {
+ for _, in := range m {
+ in(context.Background(), val, s)
+ }
+}
+
+var errEmptyAgg = errors.New("no aggregators for observable instrument")
+
+// registerable returns an error if the observable o should not be registered,
+// and nil if it should. An errEmptyAgg error is returned if o is effectively a
+// no-op because it does not have any aggregators. Also, an error is returned
+// if scope defines a Meter other than the one o was created by.
+func (o *observable[N]) registerable(m *meter) error {
+ if len(o.measures) == 0 {
+ return errEmptyAgg
+ }
+ if m != o.meter {
+ return fmt.Errorf(
+ "invalid registration: observable %q from Meter %q, registered with Meter %q",
+ o.name,
+ o.scope.Name,
+ m.scope.Name,
+ )
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
new file mode 100644
index 000000000..25ea6244e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT.
+
+package metric
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[instrumentKindUndefined-0]
+ _ = x[InstrumentKindCounter-1]
+ _ = x[InstrumentKindUpDownCounter-2]
+ _ = x[InstrumentKindHistogram-3]
+ _ = x[InstrumentKindObservableCounter-4]
+ _ = x[InstrumentKindObservableUpDownCounter-5]
+ _ = x[InstrumentKindObservableGauge-6]
+ _ = x[InstrumentKindGauge-7]
+}
+
+const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGaugeGauge"
+
+var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112}
+
+func (i InstrumentKind) String() string {
+ if i >= InstrumentKind(len(_InstrumentKind_index)-1) {
+ return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
new file mode 100644
index 000000000..0321da681
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
@@ -0,0 +1,159 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// now is used to return the current local time while allowing tests to
+// override the default time.Now function.
+var now = time.Now
+
+// Measure receives measurements to be aggregated.
+type Measure[N int64 | float64] func(context.Context, N, attribute.Set)
+
+// ComputeAggregation stores the aggregate of measurements into dest and
+// returns the number of aggregate data-points output.
+type ComputeAggregation func(dest *metricdata.Aggregation) int
+
+// Builder builds an aggregate function.
+type Builder[N int64 | float64] struct {
+ // Temporality is the temporality used for the returned aggregate function.
+ //
+ // If this is not provided a default of cumulative will be used (except for
+ // the last-value aggregate function where delta is the only appropriate
+ // temporality).
+ Temporality metricdata.Temporality
+ // Filter is the attribute filter the aggregate function will use on the
+ // input of measurements.
+ Filter attribute.Filter
+ // ReservoirFunc is the factory function used by aggregate functions to
+ // create new exemplar reservoirs for a new seen attribute set.
+ //
+ // If this is not provided a default factory function that returns an
+ // dropReservoir reservoir will be used.
+ ReservoirFunc func(attribute.Set) FilteredExemplarReservoir[N]
+ // AggregationLimit is the cardinality limit of measurement attributes. Any
+ // measurement for new attributes once the limit has been reached will be
+ // aggregated into a single aggregate for the "otel.metric.overflow"
+ // attribute.
+ //
+ // If AggregationLimit is less than or equal to zero there will not be an
+ // aggregation limit imposed (i.e. unlimited attribute sets).
+ AggregationLimit int
+}
+
+func (b Builder[N]) resFunc() func(attribute.Set) FilteredExemplarReservoir[N] {
+ if b.ReservoirFunc != nil {
+ return b.ReservoirFunc
+ }
+
+ return dropReservoir
+}
+
+type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue)
+
+func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] {
+ if b.Filter != nil {
+ fltr := b.Filter // Copy to make it immutable after assignment.
+ return func(ctx context.Context, n N, a attribute.Set) {
+ fAttr, dropped := a.Filter(fltr)
+ f(ctx, n, fAttr, dropped)
+ }
+ }
+ return func(ctx context.Context, n N, a attribute.Set) {
+ f(ctx, n, a, nil)
+ }
+}
+
+// LastValue returns a last-value aggregate function input and output.
+func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
+ lv := newLastValue[N](b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(lv.measure), lv.delta
+ default:
+ return b.filter(lv.measure), lv.cumulative
+ }
+}
+
+// PrecomputedLastValue returns a last-value aggregate function input and
+// output. The aggregation returned from the returned ComputeAggregation
+// function will always only return values from the previous collection cycle.
+func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) {
+ lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(lv.measure), lv.delta
+ default:
+ return b.filter(lv.measure), lv.cumulative
+ }
+}
+
+// PrecomputedSum returns a sum aggregate function input and output. The
+// arguments passed to the input are expected to be the precomputed sum values.
+func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) {
+ s := newPrecomputedSum[N](monotonic, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(s.measure), s.delta
+ default:
+ return b.filter(s.measure), s.cumulative
+ }
+}
+
+// Sum returns a sum aggregate function input and output.
+func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
+ s := newSum[N](monotonic, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(s.measure), s.delta
+ default:
+ return b.filter(s.measure), s.cumulative
+ }
+}
+
+// ExplicitBucketHistogram returns a histogram aggregate function input and
+// output.
+func (b Builder[N]) ExplicitBucketHistogram(
+ boundaries []float64,
+ noMinMax, noSum bool,
+) (Measure[N], ComputeAggregation) {
+ h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(h.measure), h.delta
+ default:
+ return b.filter(h.measure), h.cumulative
+ }
+}
+
+// ExponentialBucketHistogram returns a histogram aggregate function input and
+// output.
+func (b Builder[N]) ExponentialBucketHistogram(
+ maxSize, maxScale int32,
+ noMinMax, noSum bool,
+) (Measure[N], ComputeAggregation) {
+ h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+ switch b.Temporality {
+ case metricdata.DeltaTemporality:
+ return b.filter(h.measure), h.delta
+ default:
+ return b.filter(h.measure), h.cumulative
+ }
+}
+
+// reset ensures s has capacity and sets it length. If the capacity of s too
+// small, a new slice is returned with the specified capacity and length.
+func reset[T any](s []T, length, capacity int) []T {
+ if cap(s) < capacity {
+ return make([]T, length, capacity)
+ }
+ return s[:length]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
new file mode 100644
index 000000000..7b7225e6e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
@@ -0,0 +1,7 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package aggregate provides aggregate types used compute aggregations and
+// cycle the state of metric measurements made by the SDK. These types and
+// functionality are meant only for internal SDK use.
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
new file mode 100644
index 000000000..129920cbd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go
@@ -0,0 +1,27 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/exemplar"
+)
+
+// dropReservoir returns a [FilteredReservoir] that drops all measurements it is offered.
+func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N] {
+ return &dropRes[N]{}
+}
+
+type dropRes[N int64 | float64] struct{}
+
+// Offer does nothing, all measurements offered will be dropped.
+func (*dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}
+
+// Collect resets dest. No exemplars will ever be returned.
+func (*dropRes[N]) Collect(dest *[]exemplar.Exemplar) {
+ clear(*dest) // Erase elements to let GC collect objects
+ *dest = (*dest)[:0]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
new file mode 100644
index 000000000..25d709948
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go
@@ -0,0 +1,43 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "sync"
+
+ "go.opentelemetry.io/otel/sdk/metric/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+var exemplarPool = sync.Pool{
+ New: func() any { return new([]exemplar.Exemplar) },
+}
+
+func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) {
+ dest := exemplarPool.Get().(*[]exemplar.Exemplar)
+ defer func() {
+ clear(*dest) // Erase elements to let GC collect objects.
+ *dest = (*dest)[:0]
+ exemplarPool.Put(dest)
+ }()
+
+ *dest = reset(*dest, len(*out), cap(*out))
+
+ f(dest)
+
+ *out = reset(*out, len(*dest), cap(*dest))
+ for i, e := range *dest {
+ (*out)[i].FilteredAttributes = e.FilteredAttributes
+ (*out)[i].Time = e.Time
+ (*out)[i].SpanID = e.SpanID
+ (*out)[i].TraceID = e.TraceID
+
+ switch e.Value.Type() {
+ case exemplar.Int64ValueType:
+ (*out)[i].Value = N(e.Value.Int64())
+ case exemplar.Float64ValueType:
+ (*out)[i].Value = N(e.Value.Float64())
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
new file mode 100644
index 000000000..857eddf30
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -0,0 +1,478 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "errors"
+ "math"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+const (
+ expoMaxScale = 20
+ expoMinScale = -10
+
+ smallestNonZeroNormalFloat64 = 0x1p-1022
+
+ // These redefine the Math constants with a type, so the compiler won't coerce
+ // them into an int on 32 bit platforms.
+ maxInt64 int64 = math.MaxInt64
+ minInt64 int64 = math.MinInt64
+)
+
+// expoHistogramDataPoint is a single data point in an exponential histogram.
+type expoHistogramDataPoint[N int64 | float64] struct {
+ attrs attribute.Set
+ res FilteredExemplarReservoir[N]
+
+ count uint64
+ min N
+ max N
+ sum N
+
+ maxSize int
+ noMinMax bool
+ noSum bool
+
+ scale int32
+
+ posBuckets expoBuckets
+ negBuckets expoBuckets
+ zeroCount uint64
+}
+
+func newExpoHistogramDataPoint[N int64 | float64](
+ attrs attribute.Set,
+ maxSize int,
+ maxScale int32,
+ noMinMax, noSum bool,
+) *expoHistogramDataPoint[N] { // nolint:revive // we need this control flag
+ f := math.MaxFloat64
+ ma := N(f) // if N is int64, max will overflow to -9223372036854775808
+ mi := N(-f)
+ if N(maxInt64) > N(f) {
+ ma = N(maxInt64)
+ mi = N(minInt64)
+ }
+ return &expoHistogramDataPoint[N]{
+ attrs: attrs,
+ min: ma,
+ max: mi,
+ maxSize: maxSize,
+ noMinMax: noMinMax,
+ noSum: noSum,
+ scale: maxScale,
+ }
+}
+
+// record adds a new measurement to the histogram. It will rescale the buckets if needed.
+func (p *expoHistogramDataPoint[N]) record(v N) {
+ p.count++
+
+ if !p.noMinMax {
+ if v < p.min {
+ p.min = v
+ }
+ if v > p.max {
+ p.max = v
+ }
+ }
+ if !p.noSum {
+ p.sum += v
+ }
+
+ absV := math.Abs(float64(v))
+
+ if float64(absV) == 0.0 {
+ p.zeroCount++
+ return
+ }
+
+ bin := p.getBin(absV)
+
+ bucket := &p.posBuckets
+ if v < 0 {
+ bucket = &p.negBuckets
+ }
+
+ // If the new bin would make the counts larger than maxScale, we need to
+ // downscale current measurements.
+ if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 {
+ if p.scale-scaleDelta < expoMinScale {
+ // With a scale of -10 there is only two buckets for the whole range of float64 values.
+ // This can only happen if there is a max size of 1.
+ otel.Handle(errors.New("exponential histogram scale underflow"))
+ return
+ }
+ // Downscale
+ p.scale -= scaleDelta
+ p.posBuckets.downscale(scaleDelta)
+ p.negBuckets.downscale(scaleDelta)
+
+ bin = p.getBin(absV)
+ }
+
+ bucket.record(bin)
+}
+
+// getBin returns the bin v should be recorded into.
+func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 {
+ frac, expInt := math.Frexp(v)
+ // 11-bit exponential.
+ exp := int32(expInt) // nolint: gosec
+ if p.scale <= 0 {
+ // Because of the choice of fraction is always 1 power of two higher than we want.
+ var correction int32 = 1
+ if frac == .5 {
+ // If v is an exact power of two the frac will be .5 and the exp
+ // will be one higher than we want.
+ correction = 2
+ }
+ return (exp - correction) >> (-p.scale)
+ }
+ return exp<= bin {
+ low = int(bin)
+ high = int(startBin) + length - 1
+ }
+
+ var count int32
+ for high-low >= p.maxSize {
+ low >>= 1
+ high >>= 1
+ count++
+ if count > expoMaxScale-expoMinScale {
+ return count
+ }
+ }
+ return count
+}
+
+// expoBuckets is a set of buckets in an exponential histogram.
+type expoBuckets struct {
+ startBin int32
+ counts []uint64
+}
+
+// record increments the count for the given bin, and expands the buckets if needed.
+// Size changes must be done before calling this function.
+func (b *expoBuckets) record(bin int32) {
+ if len(b.counts) == 0 {
+ b.counts = []uint64{1}
+ b.startBin = bin
+ return
+ }
+
+ endBin := int(b.startBin) + len(b.counts) - 1
+
+ // if the new bin is inside the current range
+ if bin >= b.startBin && int(bin) <= endBin {
+ b.counts[bin-b.startBin]++
+ return
+ }
+ // if the new bin is before the current start add spaces to the counts
+ if bin < b.startBin {
+ origLen := len(b.counts)
+ newLength := endBin - int(bin) + 1
+ shift := b.startBin - bin
+
+ if newLength > cap(b.counts) {
+ b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
+ }
+
+ copy(b.counts[shift:origLen+int(shift)], b.counts)
+ b.counts = b.counts[:newLength]
+ for i := 1; i < int(shift); i++ {
+ b.counts[i] = 0
+ }
+ b.startBin = bin
+ b.counts[0] = 1
+ return
+ }
+ // if the new is after the end add spaces to the end
+ if int(bin) > endBin {
+ if int(bin-b.startBin) < cap(b.counts) {
+ b.counts = b.counts[:bin-b.startBin+1]
+ for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ {
+ b.counts[i] = 0
+ }
+ b.counts[bin-b.startBin] = 1
+ return
+ }
+
+ end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1)
+ b.counts = append(b.counts, end...)
+ b.counts[bin-b.startBin] = 1
+ }
+}
+
+// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
+// correct lower resolution bucket.
+func (b *expoBuckets) downscale(delta int32) {
+ // Example
+ // delta = 2
+ // Original offset: -6
+ // Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ // bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4
+ // new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1
+ // new Offset: -2
+ // new Counts: [4, 14, 30, 10]
+
+ if len(b.counts) <= 1 || delta < 1 {
+ b.startBin >>= delta
+ return
+ }
+
+ steps := int32(1) << delta
+ offset := b.startBin % steps
+ offset = (offset + steps) % steps // to make offset positive
+ for i := 1; i < len(b.counts); i++ {
+ idx := i + int(offset)
+ if idx%int(steps) == 0 {
+ b.counts[idx/int(steps)] = b.counts[i]
+ continue
+ }
+ b.counts[idx/int(steps)] += b.counts[i]
+ }
+
+ lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps)
+ b.counts = b.counts[:lastIdx+1]
+ b.startBin >>= delta
+}
+
+// newExponentialHistogram returns an Aggregator that summarizes a set of
+// measurements as an exponential histogram. Each histogram is scoped by attributes
+// and the aggregation cycle the measurements were made in.
+func newExponentialHistogram[N int64 | float64](
+ maxSize, maxScale int32,
+ noMinMax, noSum bool,
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *expoHistogram[N] {
+ return &expoHistogram[N]{
+ noSum: noSum,
+ noMinMax: noMinMax,
+ maxSize: int(maxSize),
+ maxScale: maxScale,
+
+ newRes: r,
+ limit: newLimiter[*expoHistogramDataPoint[N]](limit),
+ values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]),
+
+ start: now(),
+ }
+}
+
+// expoHistogram summarizes a set of measurements as an histogram with exponentially
+// defined buckets.
+type expoHistogram[N int64 | float64] struct {
+ noSum bool
+ noMinMax bool
+ maxSize int
+ maxScale int32
+
+ newRes func(attribute.Set) FilteredExemplarReservoir[N]
+ limit limiter[*expoHistogramDataPoint[N]]
+ values map[attribute.Distinct]*expoHistogramDataPoint[N]
+ valuesMu sync.Mutex
+
+ start time.Time
+}
+
+func (e *expoHistogram[N]) measure(
+ ctx context.Context,
+ value N,
+ fltrAttr attribute.Set,
+ droppedAttr []attribute.KeyValue,
+) {
+ // Ignore NaN and infinity.
+ if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
+ return
+ }
+
+ e.valuesMu.Lock()
+ defer e.valuesMu.Unlock()
+
+ attr := e.limit.Attributes(fltrAttr, e.values)
+ v, ok := e.values[attr.Equivalent()]
+ if !ok {
+ v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum)
+ v.res = e.newRes(attr)
+
+ e.values[attr.Equivalent()] = v
+ }
+ v.record(value)
+ v.res.Offer(ctx, value, droppedAttr)
+}
+
+func (e *expoHistogram[N]) delta(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+
+ // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
+ // In that case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.ExponentialHistogram[N])
+ h.Temporality = metricdata.DeltaTemporality
+
+ e.valuesMu.Lock()
+ defer e.valuesMu.Unlock()
+
+ n := len(e.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for _, val := range e.values {
+ hDPts[i].Attributes = val.attrs
+ hDPts[i].StartTime = e.start
+ hDPts[i].Time = t
+ hDPts[i].Count = val.count
+ hDPts[i].Scale = val.scale
+ hDPts[i].ZeroCount = val.zeroCount
+ hDPts[i].ZeroThreshold = 0.0
+
+ hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
+ hDPts[i].PositiveBucket.Counts = reset(
+ hDPts[i].PositiveBucket.Counts,
+ len(val.posBuckets.counts),
+ len(val.posBuckets.counts),
+ )
+ copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
+
+ hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
+ hDPts[i].NegativeBucket.Counts = reset(
+ hDPts[i].NegativeBucket.Counts,
+ len(val.negBuckets.counts),
+ len(val.negBuckets.counts),
+ )
+ copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
+
+ if !e.noSum {
+ hDPts[i].Sum = val.sum
+ }
+ if !e.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(val.min)
+ hDPts[i].Max = metricdata.NewExtrema(val.max)
+ }
+
+ collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
+
+ i++
+ }
+ // Unused attribute sets do not report.
+ clear(e.values)
+
+ e.start = t
+ h.DataPoints = hDPts
+ *dest = h
+ return n
+}
+
+func (e *expoHistogram[N]) cumulative(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+
+ // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
+ // In that case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.ExponentialHistogram[N])
+ h.Temporality = metricdata.CumulativeTemporality
+
+ e.valuesMu.Lock()
+ defer e.valuesMu.Unlock()
+
+ n := len(e.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for _, val := range e.values {
+ hDPts[i].Attributes = val.attrs
+ hDPts[i].StartTime = e.start
+ hDPts[i].Time = t
+ hDPts[i].Count = val.count
+ hDPts[i].Scale = val.scale
+ hDPts[i].ZeroCount = val.zeroCount
+ hDPts[i].ZeroThreshold = 0.0
+
+ hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin
+ hDPts[i].PositiveBucket.Counts = reset(
+ hDPts[i].PositiveBucket.Counts,
+ len(val.posBuckets.counts),
+ len(val.posBuckets.counts),
+ )
+ copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts)
+
+ hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin
+ hDPts[i].NegativeBucket.Counts = reset(
+ hDPts[i].NegativeBucket.Counts,
+ len(val.negBuckets.counts),
+ len(val.negBuckets.counts),
+ )
+ copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts)
+
+ if !e.noSum {
+ hDPts[i].Sum = val.sum
+ }
+ if !e.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(val.min)
+ hDPts[i].Max = metricdata.NewExtrema(val.max)
+ }
+
+ collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
+
+ i++
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ }
+
+ h.DataPoints = hDPts
+ *dest = h
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
new file mode 100644
index 000000000..d4c41642d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
@@ -0,0 +1,53 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/exemplar"
+)
+
+// FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter.
+type FilteredExemplarReservoir[N int64 | float64] interface {
+ // Offer accepts the parameters associated with a measurement. The
+ // parameters will be stored as an exemplar if the filter decides to
+ // sample the measurement.
+ //
+ // The passed ctx needs to contain any baggage or span that were active
+ // when the measurement was made. This information may be used by the
+ // Reservoir in making a sampling decision.
+ Offer(ctx context.Context, val N, attr []attribute.KeyValue)
+ // Collect returns all the held exemplars in the reservoir.
+ Collect(dest *[]exemplar.Exemplar)
+}
+
+// filteredExemplarReservoir handles the pre-sampled exemplar of measurements made.
+type filteredExemplarReservoir[N int64 | float64] struct {
+ filter exemplar.Filter
+ reservoir exemplar.Reservoir
+}
+
+// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values
+// that are allowed by the filter.
+func NewFilteredExemplarReservoir[N int64 | float64](
+ f exemplar.Filter,
+ r exemplar.Reservoir,
+) FilteredExemplarReservoir[N] {
+ return &filteredExemplarReservoir[N]{
+ filter: f,
+ reservoir: r,
+ }
+}
+
+func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) {
+ if f.filter(ctx) {
+ // only record the current time if we are sampling this measurement.
+ f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr)
+ }
+}
+
+func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) }
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
new file mode 100644
index 000000000..736287e73
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
@@ -0,0 +1,251 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "slices"
+ "sort"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+type buckets[N int64 | float64] struct {
+ attrs attribute.Set
+ res FilteredExemplarReservoir[N]
+
+ counts []uint64
+ count uint64
+ total N
+ min, max N
+}
+
+// newBuckets returns buckets with n bins.
+func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] {
+ return &buckets[N]{attrs: attrs, counts: make([]uint64, n)}
+}
+
+func (b *buckets[N]) sum(value N) { b.total += value }
+
+func (b *buckets[N]) bin(idx int, value N) {
+ b.counts[idx]++
+ b.count++
+ if value < b.min {
+ b.min = value
+ } else if value > b.max {
+ b.max = value
+ }
+}
+
+// histValues summarizes a set of measurements as an histValues with
+// explicitly defined buckets.
+type histValues[N int64 | float64] struct {
+ noSum bool
+ bounds []float64
+
+ newRes func(attribute.Set) FilteredExemplarReservoir[N]
+ limit limiter[*buckets[N]]
+ values map[attribute.Distinct]*buckets[N]
+ valuesMu sync.Mutex
+}
+
+func newHistValues[N int64 | float64](
+ bounds []float64,
+ noSum bool,
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *histValues[N] {
+ // The responsibility of keeping all buckets correctly associated with the
+ // passed boundaries is ultimately this type's responsibility. Make a copy
+ // here so we can always guarantee this. Or, in the case of failure, have
+ // complete control over the fix.
+ b := slices.Clone(bounds)
+ slices.Sort(b)
+ return &histValues[N]{
+ noSum: noSum,
+ bounds: b,
+ newRes: r,
+ limit: newLimiter[*buckets[N]](limit),
+ values: make(map[attribute.Distinct]*buckets[N]),
+ }
+}
+
+// Aggregate records the measurement value, scoped by attr, and aggregates it
+// into a histogram.
+func (s *histValues[N]) measure(
+ ctx context.Context,
+ value N,
+ fltrAttr attribute.Set,
+ droppedAttr []attribute.KeyValue,
+) {
+ // This search will return an index in the range [0, len(s.bounds)], where
+ // it will return len(s.bounds) if value is greater than the last element
+ // of s.bounds. This aligns with the buckets in that the length of buckets
+ // is len(s.bounds)+1, with the last bucket representing:
+ // (s.bounds[len(s.bounds)-1], +∞).
+ idx := sort.SearchFloat64s(s.bounds, float64(value))
+
+ s.valuesMu.Lock()
+ defer s.valuesMu.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ b, ok := s.values[attr.Equivalent()]
+ if !ok {
+ // N+1 buckets. For example:
+ //
+ // bounds = [0, 5, 10]
+ //
+ // Then,
+ //
+ // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
+ b = newBuckets[N](attr, len(s.bounds)+1)
+ b.res = s.newRes(attr)
+
+ // Ensure min and max are recorded values (not zero), for new buckets.
+ b.min, b.max = value, value
+ s.values[attr.Equivalent()] = b
+ }
+ b.bin(idx, value)
+ if !s.noSum {
+ b.sum(value)
+ }
+ b.res.Offer(ctx, value, droppedAttr)
+}
+
+// newHistogram returns an Aggregator that summarizes a set of measurements as
+// an histogram.
+func newHistogram[N int64 | float64](
+ boundaries []float64,
+ noMinMax, noSum bool,
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *histogram[N] {
+ return &histogram[N]{
+ histValues: newHistValues[N](boundaries, noSum, limit, r),
+ noMinMax: noMinMax,
+ start: now(),
+ }
+}
+
+// histogram summarizes a set of measurements as an histogram with explicitly
+// defined buckets.
+type histogram[N int64 | float64] struct {
+ *histValues[N]
+
+ noMinMax bool
+ start time.Time
+}
+
+func (s *histogram[N]) delta(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+
+ // If *dest is not a metricdata.Histogram, memory reuse is missed. In that
+ // case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.Histogram[N])
+ h.Temporality = metricdata.DeltaTemporality
+
+ s.valuesMu.Lock()
+ defer s.valuesMu.Unlock()
+
+ // Do not allow modification of our copy of bounds.
+ bounds := slices.Clone(s.bounds)
+
+ n := len(s.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for _, val := range s.values {
+ hDPts[i].Attributes = val.attrs
+ hDPts[i].StartTime = s.start
+ hDPts[i].Time = t
+ hDPts[i].Count = val.count
+ hDPts[i].Bounds = bounds
+ hDPts[i].BucketCounts = val.counts
+
+ if !s.noSum {
+ hDPts[i].Sum = val.total
+ }
+
+ if !s.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(val.min)
+ hDPts[i].Max = metricdata.NewExtrema(val.max)
+ }
+
+ collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
+
+ i++
+ }
+ // Unused attribute sets do not report.
+ clear(s.values)
+ // The delta collection cycle resets.
+ s.start = t
+
+ h.DataPoints = hDPts
+ *dest = h
+
+ return n
+}
+
+func (s *histogram[N]) cumulative(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+
+ // If *dest is not a metricdata.Histogram, memory reuse is missed. In that
+ // case, use the zero-value h and hope for better alignment next cycle.
+ h, _ := (*dest).(metricdata.Histogram[N])
+ h.Temporality = metricdata.CumulativeTemporality
+
+ s.valuesMu.Lock()
+ defer s.valuesMu.Unlock()
+
+ // Do not allow modification of our copy of bounds.
+ bounds := slices.Clone(s.bounds)
+
+ n := len(s.values)
+ hDPts := reset(h.DataPoints, n, n)
+
+ var i int
+ for _, val := range s.values {
+ hDPts[i].Attributes = val.attrs
+ hDPts[i].StartTime = s.start
+ hDPts[i].Time = t
+ hDPts[i].Count = val.count
+ hDPts[i].Bounds = bounds
+
+ // The HistogramDataPoint field values returned need to be copies of
+ // the buckets value as we will keep updating them.
+ //
+ // TODO (#3047): Making copies for bounds and counts incurs a large
+ // memory allocation footprint. Alternatives should be explored.
+ hDPts[i].BucketCounts = slices.Clone(val.counts)
+
+ if !s.noSum {
+ hDPts[i].Sum = val.total
+ }
+
+ if !s.noMinMax {
+ hDPts[i].Min = metricdata.NewExtrema(val.min)
+ hDPts[i].Max = metricdata.NewExtrema(val.max)
+ }
+
+ collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
+
+ i++
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ }
+
+ h.DataPoints = hDPts
+ *dest = h
+
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
new file mode 100644
index 000000000..4bbe624c7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
@@ -0,0 +1,172 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// datapoint is timestamped measurement data.
+type datapoint[N int64 | float64] struct {
+ attrs attribute.Set
+ value N
+ res FilteredExemplarReservoir[N]
+}
+
+func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] {
+ return &lastValue[N]{
+ newRes: r,
+ limit: newLimiter[datapoint[N]](limit),
+ values: make(map[attribute.Distinct]datapoint[N]),
+ start: now(),
+ }
+}
+
+// lastValue summarizes a set of measurements as the last one made.
+type lastValue[N int64 | float64] struct {
+ sync.Mutex
+
+ newRes func(attribute.Set) FilteredExemplarReservoir[N]
+ limit limiter[datapoint[N]]
+ values map[attribute.Distinct]datapoint[N]
+ start time.Time
+}
+
+func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ s.Lock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ d, ok := s.values[attr.Equivalent()]
+ if !ok {
+ d.res = s.newRes(attr)
+ }
+
+ d.attrs = attr
+ d.value = value
+ d.res.Offer(ctx, value, droppedAttr)
+
+ s.values[attr.Equivalent()] = d
+}
+
+func (s *lastValue[N]) delta(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+ // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
+ // the DataPoints is missed (better luck next time).
+ gData, _ := (*dest).(metricdata.Gauge[N])
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := s.copyDpts(&gData.DataPoints, t)
+ // Do not report stale values.
+ clear(s.values)
+ // Update start time for delta temporality.
+ s.start = t
+
+ *dest = gData
+
+ return n
+}
+
+func (s *lastValue[N]) cumulative(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+ // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
+ // the DataPoints is missed (better luck next time).
+ gData, _ := (*dest).(metricdata.Gauge[N])
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := s.copyDpts(&gData.DataPoints, t)
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ *dest = gData
+
+ return n
+}
+
+// copyDpts copies the datapoints held by s into dest. The number of datapoints
+// copied is returned.
+func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) int {
+ n := len(s.values)
+ *dest = reset(*dest, n, n)
+
+ var i int
+ for _, v := range s.values {
+ (*dest)[i].Attributes = v.attrs
+ (*dest)[i].StartTime = s.start
+ (*dest)[i].Time = t
+ (*dest)[i].Value = v.value
+ collectExemplars(&(*dest)[i].Exemplars, v.res.Collect)
+ i++
+ }
+ return n
+}
+
+// newPrecomputedLastValue returns an aggregator that summarizes a set of
+// observations as the last one made.
+func newPrecomputedLastValue[N int64 | float64](
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *precomputedLastValue[N] {
+ return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
+}
+
+// precomputedLastValue summarizes a set of observations as the last one made.
+type precomputedLastValue[N int64 | float64] struct {
+ *lastValue[N]
+}
+
+func (s *precomputedLastValue[N]) delta(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+ // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
+ // the DataPoints is missed (better luck next time).
+ gData, _ := (*dest).(metricdata.Gauge[N])
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := s.copyDpts(&gData.DataPoints, t)
+ // Do not report stale values.
+ clear(s.values)
+ // Update start time for delta temporality.
+ s.start = t
+
+ *dest = gData
+
+ return n
+}
+
+func (s *precomputedLastValue[N]) cumulative(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+ // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
+ // the DataPoints is missed (better luck next time).
+ gData, _ := (*dest).(metricdata.Gauge[N])
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := s.copyDpts(&gData.DataPoints, t)
+ // Do not report stale values.
+ clear(s.values)
+ *dest = gData
+
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
new file mode 100644
index 000000000..9ea0251ed
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// overflowSet is the attribute set used to record a measurement when adding
+// another distinct attribute set to the aggregate would exceed the aggregate
+// limit.
+var overflowSet = attribute.NewSet(attribute.Bool("otel.metric.overflow", true))
+
+// limiter limits aggregate values.
+type limiter[V any] struct {
+ // aggLimit is the maximum number of metric streams that can be aggregated.
+ //
+ // Any metric stream with attributes distinct from any set already
+ // aggregated once the aggLimit will be meet will instead be aggregated
+ // into an "overflow" metric stream. That stream will only contain the
+ // "otel.metric.overflow"=true attribute.
+ aggLimit int
+}
+
+// newLimiter returns a new Limiter with the provided aggregation limit.
+func newLimiter[V any](aggregation int) limiter[V] {
+ return limiter[V]{aggLimit: aggregation}
+}
+
+// Attributes checks if adding a measurement for attrs will exceed the
+// aggregation cardinality limit for the existing measurements. If it will,
+// overflowSet is returned. Otherwise, if it will not exceed the limit, or the
+// limit is not set (limit <= 0), attr is returned.
+func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set {
+ if l.aggLimit > 0 {
+ _, exists := measurements[attrs.Equivalent()]
+ if !exists && len(measurements) >= l.aggLimit-1 {
+ return overflowSet
+ }
+ }
+
+ return attrs
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
new file mode 100644
index 000000000..1b4b2304c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
@@ -0,0 +1,249 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+type sumValue[N int64 | float64] struct {
+ n N
+ res FilteredExemplarReservoir[N]
+ attrs attribute.Set
+}
+
+// valueMap is the storage for sums.
+type valueMap[N int64 | float64] struct {
+ sync.Mutex
+ newRes func(attribute.Set) FilteredExemplarReservoir[N]
+ limit limiter[sumValue[N]]
+ values map[attribute.Distinct]sumValue[N]
+}
+
+func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] {
+ return &valueMap[N]{
+ newRes: r,
+ limit: newLimiter[sumValue[N]](limit),
+ values: make(map[attribute.Distinct]sumValue[N]),
+ }
+}
+
+func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ s.Lock()
+ defer s.Unlock()
+
+ attr := s.limit.Attributes(fltrAttr, s.values)
+ v, ok := s.values[attr.Equivalent()]
+ if !ok {
+ v.res = s.newRes(attr)
+ }
+
+ v.attrs = attr
+ v.n += value
+ v.res.Offer(ctx, value, droppedAttr)
+
+ s.values[attr.Equivalent()] = v
+}
+
+// newSum returns an aggregator that summarizes a set of measurements as their
+// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
+// the measurements were made in.
+func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] {
+ return &sum[N]{
+ valueMap: newValueMap[N](limit, r),
+ monotonic: monotonic,
+ start: now(),
+ }
+}
+
+// sum summarizes a set of measurements made as their arithmetic sum.
+type sum[N int64 | float64] struct {
+ *valueMap[N]
+
+ monotonic bool
+ start time.Time
+}
+
+func (s *sum[N]) delta(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.DeltaTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for _, val := range s.values {
+ dPts[i].Attributes = val.attrs
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = val.n
+ collectExemplars(&dPts[i].Exemplars, val.res.Collect)
+ i++
+ }
+ // Do not report stale values.
+ clear(s.values)
+ // The delta collection cycle resets.
+ s.start = t
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
+
+func (s *sum[N]) cumulative(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.CumulativeTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for _, value := range s.values {
+ dPts[i].Attributes = value.attrs
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = value.n
+ collectExemplars(&dPts[i].Exemplars, value.res.Collect)
+ // TODO (#3006): This will use an unbounded amount of memory if there
+ // are unbounded number of attribute sets being aggregated. Attribute
+ // sets that become "stale" need to be forgotten so this will not
+ // overload the system.
+ i++
+ }
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
+
+// newPrecomputedSum returns an aggregator that summarizes a set of
+// observations as their arithmetic sum. Each sum is scoped by attributes and
+// the aggregation cycle the measurements were made in.
+func newPrecomputedSum[N int64 | float64](
+ monotonic bool,
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *precomputedSum[N] {
+ return &precomputedSum[N]{
+ valueMap: newValueMap[N](limit, r),
+ monotonic: monotonic,
+ start: now(),
+ }
+}
+
+// precomputedSum summarizes a set of observations as their arithmetic sum.
+type precomputedSum[N int64 | float64] struct {
+ *valueMap[N]
+
+ monotonic bool
+ start time.Time
+
+ reported map[attribute.Distinct]N
+}
+
+func (s *precomputedSum[N]) delta(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+ newReported := make(map[attribute.Distinct]N)
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.DeltaTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for key, value := range s.values {
+ delta := value.n - s.reported[key]
+
+ dPts[i].Attributes = value.attrs
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = delta
+ collectExemplars(&dPts[i].Exemplars, value.res.Collect)
+
+ newReported[key] = value.n
+ i++
+ }
+ // Unused attribute sets do not report.
+ clear(s.values)
+ s.reported = newReported
+ // The delta collection cycle resets.
+ s.start = t
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
+
+func (s *precomputedSum[N]) cumulative(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+) int {
+ t := now()
+
+ // If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
+ // use the zero-value sData and hope for better alignment next cycle.
+ sData, _ := (*dest).(metricdata.Sum[N])
+ sData.Temporality = metricdata.CumulativeTemporality
+ sData.IsMonotonic = s.monotonic
+
+ s.Lock()
+ defer s.Unlock()
+
+ n := len(s.values)
+ dPts := reset(sData.DataPoints, n, n)
+
+ var i int
+ for _, val := range s.values {
+ dPts[i].Attributes = val.attrs
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = val.n
+ collectExemplars(&dPts[i].Exemplars, val.res.Collect)
+
+ i++
+ }
+ // Unused attribute sets do not report.
+ clear(s.values)
+
+ sData.DataPoints = dPts
+ *dest = sData
+
+ return n
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
new file mode 100644
index 000000000..ea452be6c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
@@ -0,0 +1,14 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package internal provides internal functionality for the metric package.
+package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
+
+// ReuseSlice returns a zeroed view of slice if its capacity is greater than or
+// equal to n. Otherwise, it returns a new []T with capacity equal to n.
+func ReuseSlice[T any](slice []T, n int) []T {
+ if cap(slice) >= n {
+ return slice[:n]
+ }
+ return make([]T, n)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
new file mode 100644
index 000000000..be0714a5f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
@@ -0,0 +1,100 @@
+# Experimental Features
+
+The Metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go Metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These feature may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Exemplars](#exemplars)
+- [Instrument Enabled](#instrument-enabled)
+
+### Exemplars
+
+A sample of measurements made may be exported directly as a set of exemplars.
+
+This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable.
+The value of must be the case-insensitive string of `"true"` to enable the feature.
+All other values are ignored.
+
+Exemplar filters are a supported.
+The exemplar filter applies to all measurements made.
+They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir.
+
+To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable.
+The value must be the case-sensitive string defined by the [OpenTelemetry specification].
+
+- `"always_on"`: allows all measurements
+- `"always_off"`: denies all measurements
+- `"trace_based"`: allows only sampled measurements
+
+All values other than these will result in the default, `"trace_based"`, exemplar filter being used.
+
+[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar
+
+#### Examples
+
+Enable exemplars to be exported.
+
+```console
+export OTEL_GO_X_EXEMPLAR=true
+```
+
+Disable exemplars from being exported.
+
+```console
+unset OTEL_GO_X_EXEMPLAR
+```
+
+Set the exemplar filter to allow all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_on
+```
+
+Set the exemplar filter to deny all measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=always_off
+```
+
+Set the exemplar filter to only allow sampled measurements.
+
+```console
+export OTEL_METRICS_EXEMPLAR_FILTER=trace_based
+```
+
+Revert to the default exemplar filter (`"trace_based"`)
+
+```console
+unset OTEL_METRICS_EXEMPLAR_FILTER
+```
+
+### Instrument Enabled
+
+To help users avoid performing computationally expensive operations when recording measurements, synchronous instruments provide an `Enabled` method.
+
+#### Examples
+
+The following code shows an example of how to check if an instrument implements the `EnabledInstrument` interface before using the `Enabled` function to avoid doing an expensive computation:
+
+```go
+type enabledInstrument interface { Enabled(context.Context) bool }
+
+ctr, err := m.Int64Counter("expensive-counter")
+c, ok := ctr.(enabledInstrument)
+if !ok || c.Enabled(context.Background()) {
+ c.Add(expensiveComputation())
+}
+```
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
new file mode 100644
index 000000000..294dcf846
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x contains support for OTel metric SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
+package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
+
+import (
+ "context"
+ "os"
+)
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+//nolint:unused
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled reports whether the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
+
+// EnabledInstrument informs whether the instrument is enabled.
+//
+// EnabledInstrument interface is implemented by synchronous instruments.
+type EnabledInstrument interface {
+ // Enabled reports whether the instrument will process measurements for the given context.
+ //
+ // This function can be used in places where measuring an instrument
+ // would result in computationally expensive operations.
+ Enabled(context.Context) bool
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
new file mode 100644
index 000000000..85d3dc207
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
@@ -0,0 +1,204 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// ManualReader is a simple Reader that allows an application to
+// read metrics on demand.
+type ManualReader struct {
+ sdkProducer atomic.Value
+ shutdownOnce sync.Once
+
+ mu sync.Mutex
+ isShutdown bool
+ externalProducers atomic.Value
+
+ temporalitySelector TemporalitySelector
+ aggregationSelector AggregationSelector
+}
+
+// Compile time check the manualReader implements Reader and is comparable.
+var _ = map[Reader]struct{}{&ManualReader{}: {}}
+
+// NewManualReader returns a Reader which is directly called to collect metrics.
+func NewManualReader(opts ...ManualReaderOption) *ManualReader {
+ cfg := newManualReaderConfig(opts)
+ r := &ManualReader{
+ temporalitySelector: cfg.temporalitySelector,
+ aggregationSelector: cfg.aggregationSelector,
+ }
+ r.externalProducers.Store(cfg.producers)
+ return r
+}
+
+// register stores the sdkProducer which enables the caller
+// to read metrics from the SDK on demand.
+func (mr *ManualReader) register(p sdkProducer) {
+ // Only register once. If producer is already set, do nothing.
+ if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
+ msg := "did not register manual reader"
+ global.Error(errDuplicateRegister, msg)
+ }
+}
+
+// temporality reports the Temporality for the instrument kind provided.
+func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality {
+ return mr.temporalitySelector(kind)
+}
+
+// aggregation returns what Aggregation to use for kind.
+func (mr *ManualReader) aggregation(
+ kind InstrumentKind,
+) Aggregation { // nolint:revive // import-shadow for method scoped by type.
+ return mr.aggregationSelector(kind)
+}
+
+// Shutdown closes any connections and frees any resources used by the reader.
+//
+// This method is safe to call concurrently.
+func (mr *ManualReader) Shutdown(context.Context) error {
+ err := ErrReaderShutdown
+ mr.shutdownOnce.Do(func() {
+ // Any future call to Collect will now return ErrReaderShutdown.
+ mr.sdkProducer.Store(produceHolder{
+ produce: shutdownProducer{}.produce,
+ })
+ mr.mu.Lock()
+ defer mr.mu.Unlock()
+ mr.isShutdown = true
+ // release references to Producer(s)
+ mr.externalProducers.Store([]Producer{})
+ err = nil
+ })
+ return err
+}
+
+// Collect gathers all metric data related to the Reader from
+// the SDK and other Producers and stores the result in rm.
+//
+// Collect will return an error if called after shutdown.
+// Collect will return an error if rm is a nil ResourceMetrics.
+// Collect will return an error if the context's Done channel is closed.
+//
+// This method is safe to call concurrently.
+func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if rm == nil {
+ return errors.New("manual reader: *metricdata.ResourceMetrics is nil")
+ }
+ p := mr.sdkProducer.Load()
+ if p == nil {
+ return ErrReaderNotRegistered
+ }
+
+ ph, ok := p.(produceHolder)
+ if !ok {
+ // The atomic.Value is entirely in the periodicReader's control so
+ // this should never happen. In the unforeseen case that this does
+ // happen, return an error instead of panicking so a users code does
+ // not halt in the processes.
+ err := fmt.Errorf("manual reader: invalid producer: %T", p)
+ return err
+ }
+
+ err := ph.produce(ctx, rm)
+ if err != nil {
+ return err
+ }
+ for _, producer := range mr.externalProducers.Load().([]Producer) {
+ externalMetrics, e := producer.Produce(ctx)
+ if e != nil {
+ err = errors.Join(err, e)
+ }
+ rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
+ }
+
+ global.Debug("ManualReader collection", "Data", rm)
+
+ return err
+}
+
+// MarshalLog returns logging data about the ManualReader.
+func (r *ManualReader) MarshalLog() any {
+ r.mu.Lock()
+ down := r.isShutdown
+ r.mu.Unlock()
+ return struct {
+ Type string
+ Registered bool
+ Shutdown bool
+ }{
+ Type: "ManualReader",
+ Registered: r.sdkProducer.Load() != nil,
+ Shutdown: down,
+ }
+}
+
+// manualReaderConfig contains configuration options for a ManualReader.
+type manualReaderConfig struct {
+ temporalitySelector TemporalitySelector
+ aggregationSelector AggregationSelector
+ producers []Producer
+}
+
+// newManualReaderConfig returns a manualReaderConfig configured with options.
+func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig {
+ cfg := manualReaderConfig{
+ temporalitySelector: DefaultTemporalitySelector,
+ aggregationSelector: DefaultAggregationSelector,
+ }
+ for _, opt := range opts {
+ cfg = opt.applyManual(cfg)
+ }
+ return cfg
+}
+
+// ManualReaderOption applies a configuration option value to a ManualReader.
+type ManualReaderOption interface {
+ applyManual(manualReaderConfig) manualReaderConfig
+}
+
+// WithTemporalitySelector sets the TemporalitySelector a reader will use to
+// determine the Temporality of an instrument based on its kind. If this
+// option is not used, the reader will use the DefaultTemporalitySelector.
+func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption {
+ return temporalitySelectorOption{selector: selector}
+}
+
+type temporalitySelectorOption struct {
+ selector func(instrument InstrumentKind) metricdata.Temporality
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig {
+ mrc.temporalitySelector = t.selector
+ return mrc
+}
+
+// WithAggregationSelector sets the AggregationSelector a reader will use to
+// determine the aggregation to use for an instrument based on its kind. If
+// this option is not used, the reader will use the DefaultAggregationSelector
+// or the aggregation explicitly passed for a view matching an instrument.
+func WithAggregationSelector(selector AggregationSelector) ManualReaderOption {
+ return aggregationSelectorOption{selector: selector}
+}
+
+type aggregationSelectorOption struct {
+ selector AggregationSelector
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig {
+ c.aggregationSelector = t.selector
+ return c
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
new file mode 100644
index 000000000..e0a1e90e7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
@@ -0,0 +1,773 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+)
+
+// ErrInstrumentName indicates the created instrument has an invalid name.
+// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter.
+var ErrInstrumentName = errors.New("invalid instrument name")
+
+// meter handles the creation and coordination of all metric instruments. A
+// meter represents a single instrumentation scope; all metric telemetry
+// produced by an instrumentation scope will use metric instruments from a
+// single meter.
+type meter struct {
+ embedded.Meter
+
+ scope instrumentation.Scope
+ pipes pipelines
+
+ int64Insts *cacheWithErr[instID, *int64Inst]
+ float64Insts *cacheWithErr[instID, *float64Inst]
+ int64ObservableInsts *cacheWithErr[instID, int64Observable]
+ float64ObservableInsts *cacheWithErr[instID, float64Observable]
+
+ int64Resolver resolver[int64]
+ float64Resolver resolver[float64]
+}
+
+func newMeter(s instrumentation.Scope, p pipelines) *meter {
+ // viewCache ensures instrument conflicts, including number conflicts, this
+ // meter is asked to create are logged to the user.
+ var viewCache cache[string, instID]
+
+ var int64Insts cacheWithErr[instID, *int64Inst]
+ var float64Insts cacheWithErr[instID, *float64Inst]
+ var int64ObservableInsts cacheWithErr[instID, int64Observable]
+ var float64ObservableInsts cacheWithErr[instID, float64Observable]
+
+ return &meter{
+ scope: s,
+ pipes: p,
+ int64Insts: &int64Insts,
+ float64Insts: &float64Insts,
+ int64ObservableInsts: &int64ObservableInsts,
+ float64ObservableInsts: &float64ObservableInsts,
+ int64Resolver: newResolver[int64](p, &viewCache),
+ float64Resolver: newResolver[float64](p, &viewCache),
+ }
+}
+
+// Compile-time check meter implements metric.Meter.
+var _ metric.Meter = (*meter)(nil)
+
+// Int64Counter returns a new instrument identified by name and configured with
+// options. The instrument is used to synchronously record increasing int64
+// measurements during a computational operation.
+func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ cfg := metric.NewInt64CounterConfig(options...)
+ const kind = InstrumentKindCounter
+ p := int64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Int64UpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to synchronously record
+// int64 measurements during a computational operation.
+func (m *meter) Int64UpDownCounter(
+ name string,
+ options ...metric.Int64UpDownCounterOption,
+) (metric.Int64UpDownCounter, error) {
+ cfg := metric.NewInt64UpDownCounterConfig(options...)
+ const kind = InstrumentKindUpDownCounter
+ p := int64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Int64Histogram returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of int64 measurements during a computational operation.
+func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ cfg := metric.NewInt64HistogramConfig(options...)
+ p := int64InstProvider{m}
+ i, err := p.lookupHistogram(name, cfg)
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Int64Gauge returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of int64 measurements during a computational operation.
+func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ cfg := metric.NewInt64GaugeConfig(options...)
+ const kind = InstrumentKindGauge
+ p := int64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// int64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) {
+ key := instID{
+ Name: id.Name,
+ Description: id.Description,
+ Unit: id.Unit,
+ Kind: id.Kind,
+ }
+ if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+ warnRepeatedObservableCallbacks(id)
+ }
+ return m.int64ObservableInsts.Lookup(key, func() (int64Observable, error) {
+ inst := newInt64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+ for _, insert := range m.int64Resolver.inserters {
+ // Connect the measure functions for instruments in this pipeline with the
+ // callbacks for this pipeline.
+ in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ return inst, err
+ }
+ // Drop aggregation
+ if len(in) == 0 {
+ inst.dropAggregation = true
+ continue
+ }
+ inst.appendMeasures(in)
+
+ // Add the measures to the pipeline. It is required to maintain
+ // measures per pipeline to avoid calling the measure that
+ // is not part of the pipeline.
+ insert.pipeline.addInt64Measure(inst.observableID, in)
+ for _, cback := range callbacks {
+ inst := int64Observer{measures: in}
+ fn := cback
+ insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+ }
+ }
+ return inst, validateInstrumentName(id.Name)
+ })
+}
+
+// Int64ObservableCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// increasing int64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Int64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Int64ObservableCounter(
+ name string,
+ options ...metric.Int64ObservableCounterOption,
+) (metric.Int64ObservableCounter, error) {
+ cfg := metric.NewInt64ObservableCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableCounter,
+ Scope: m.scope,
+ }
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Int64ObservableUpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// int64 measurements once per a measurement collection cycle. Only the
+// measurements recorded during the collection cycle are exported.
+//
+// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Int64ObservableUpDownCounter(
+ name string,
+ options ...metric.Int64ObservableUpDownCounterOption,
+) (metric.Int64ObservableUpDownCounter, error) {
+ cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableUpDownCounter,
+ Scope: m.scope,
+ }
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Int64ObservableGauge returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// instantaneous int64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Int64ObservableGauge is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Int64ObservableGauge(
+ name string,
+ options ...metric.Int64ObservableGaugeOption,
+) (metric.Int64ObservableGauge, error) {
+ cfg := metric.NewInt64ObservableGaugeConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableGauge,
+ Scope: m.scope,
+ }
+ return m.int64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64Counter returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record increasing
+// float64 measurements during a computational operation.
+func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ cfg := metric.NewFloat64CounterConfig(options...)
+ const kind = InstrumentKindCounter
+ p := float64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Float64UpDownCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to synchronously record
+// float64 measurements during a computational operation.
+func (m *meter) Float64UpDownCounter(
+ name string,
+ options ...metric.Float64UpDownCounterOption,
+) (metric.Float64UpDownCounter, error) {
+ cfg := metric.NewFloat64UpDownCounterConfig(options...)
+ const kind = InstrumentKindUpDownCounter
+ p := float64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Float64Histogram returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of float64 measurements during a computational operation.
+func (m *meter) Float64Histogram(
+ name string,
+ options ...metric.Float64HistogramOption,
+) (metric.Float64Histogram, error) {
+ cfg := metric.NewFloat64HistogramConfig(options...)
+ p := float64InstProvider{m}
+ i, err := p.lookupHistogram(name, cfg)
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// Float64Gauge returns a new instrument identified by name and configured
+// with options. The instrument is used to synchronously record the
+// distribution of float64 measurements during a computational operation.
+func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ cfg := metric.NewFloat64GaugeConfig(options...)
+ const kind = InstrumentKindGauge
+ p := float64InstProvider{m}
+ i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+ if err != nil {
+ return i, err
+ }
+
+ return i, validateInstrumentName(name)
+}
+
+// float64ObservableInstrument returns a new observable identified by the Instrument.
+// It registers callbacks for each reader's pipeline.
+func (m *meter) float64ObservableInstrument(
+ id Instrument,
+ callbacks []metric.Float64Callback,
+) (float64Observable, error) {
+ key := instID{
+ Name: id.Name,
+ Description: id.Description,
+ Unit: id.Unit,
+ Kind: id.Kind,
+ }
+ if m.int64ObservableInsts.HasKey(key) && len(callbacks) > 0 {
+ warnRepeatedObservableCallbacks(id)
+ }
+ return m.float64ObservableInsts.Lookup(key, func() (float64Observable, error) {
+ inst := newFloat64Observable(m, id.Kind, id.Name, id.Description, id.Unit)
+ for _, insert := range m.float64Resolver.inserters {
+ // Connect the measure functions for instruments in this pipeline with the
+ // callbacks for this pipeline.
+ in, err := insert.Instrument(id, insert.readerDefaultAggregation(id.Kind))
+ if err != nil {
+ return inst, err
+ }
+ // Drop aggregation
+ if len(in) == 0 {
+ inst.dropAggregation = true
+ continue
+ }
+ inst.appendMeasures(in)
+
+ // Add the measures to the pipeline. It is required to maintain
+ // measures per pipeline to avoid calling the measure that
+ // is not part of the pipeline.
+ insert.pipeline.addFloat64Measure(inst.observableID, in)
+ for _, cback := range callbacks {
+ inst := float64Observer{measures: in}
+ fn := cback
+ insert.addCallback(func(ctx context.Context) error { return fn(ctx, inst) })
+ }
+ }
+ return inst, validateInstrumentName(id.Name)
+ })
+}
+
+// Float64ObservableCounter returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// increasing float64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Float64ObservableCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Float64ObservableCounter(
+ name string,
+ options ...metric.Float64ObservableCounterOption,
+) (metric.Float64ObservableCounter, error) {
+ cfg := metric.NewFloat64ObservableCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableCounter,
+ Scope: m.scope,
+ }
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64ObservableUpDownCounter returns a new instrument identified by name
+// and configured with options. The instrument is used to asynchronously record
+// float64 measurements once per a measurement collection cycle. Only the
+// measurements recorded during the collection cycle are exported.
+//
+// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Float64ObservableUpDownCounter(
+ name string,
+ options ...metric.Float64ObservableUpDownCounterOption,
+) (metric.Float64ObservableUpDownCounter, error) {
+ cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableUpDownCounter,
+ Scope: m.scope,
+ }
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+// Float64ObservableGauge returns a new instrument identified by name and
+// configured with options. The instrument is used to asynchronously record
+// instantaneous float64 measurements once per a measurement collection cycle.
+// Only the measurements recorded during the collection cycle are exported.
+//
+// If Float64ObservableGauge is invoked repeatedly with the same Name,
+// Description, and Unit, only the first set of callbacks provided are used.
+// Use meter.RegisterCallback and Registration.Unregister to manage callbacks
+// if instrumentation can be created multiple times with different callbacks.
+func (m *meter) Float64ObservableGauge(
+ name string,
+ options ...metric.Float64ObservableGaugeOption,
+) (metric.Float64ObservableGauge, error) {
+ cfg := metric.NewFloat64ObservableGaugeConfig(options...)
+ id := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindObservableGauge,
+ Scope: m.scope,
+ }
+ return m.float64ObservableInstrument(id, cfg.Callbacks())
+}
+
+func validateInstrumentName(name string) error {
+ if name == "" {
+ return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name)
+ }
+ if len(name) > 255 {
+ return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name)
+ }
+ if !isAlpha([]rune(name)[0]) {
+ return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name)
+ }
+ if len(name) == 1 {
+ return nil
+ }
+ for _, c := range name[1:] {
+ if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' {
+ return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name)
+ }
+ }
+ return nil
+}
+
+func isAlpha(c rune) bool {
+ return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')
+}
+
+func isAlphanumeric(c rune) bool {
+ return isAlpha(c) || ('0' <= c && c <= '9')
+}
+
+func warnRepeatedObservableCallbacks(id Instrument) {
+ inst := fmt.Sprintf(
+ "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
+ id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
+ )
+ global.Warn(
+ "Repeated observable instrument creation with callbacks. Ignoring new callbacks. Use meter.RegisterCallback and Registration.Unregister to manage callbacks.",
+ "instrument",
+ inst,
+ )
+}
+
+// RegisterCallback registers f to be called each collection cycle so it will
+// make observations for insts during those cycles.
+//
+// The only instruments f can make observations for are insts. All other
+// observations will be dropped and an error will be logged.
+//
+// Only instruments from this meter can be registered with f, an error is
+// returned if other instrument are provided.
+//
+// Only observations made in the callback will be exported. Unlike synchronous
+// instruments, asynchronous callbacks can "forget" attribute sets that are no
+// longer relevant by omitting the observation during the callback.
+//
+// The returned Registration can be used to unregister f.
+func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
+ if len(insts) == 0 {
+ // Don't allocate a observer if not needed.
+ return noopRegister{}, nil
+ }
+
+ var err error
+ validInstruments := make([]metric.Observable, 0, len(insts))
+ for _, inst := range insts {
+ switch o := inst.(type) {
+ case int64Observable:
+ if e := o.registerable(m); e != nil {
+ if !errors.Is(e, errEmptyAgg) {
+ err = errors.Join(err, e)
+ }
+ continue
+ }
+
+ validInstruments = append(validInstruments, inst)
+ case float64Observable:
+ if e := o.registerable(m); e != nil {
+ if !errors.Is(e, errEmptyAgg) {
+ err = errors.Join(err, e)
+ }
+ continue
+ }
+
+ validInstruments = append(validInstruments, inst)
+ default:
+ // Instrument external to the SDK.
+ return nil, errors.New("invalid observable: from different implementation")
+ }
+ }
+
+ if len(validInstruments) == 0 {
+ // All insts use drop aggregation or are invalid.
+ return noopRegister{}, err
+ }
+
+ unregs := make([]func(), len(m.pipes))
+ for ix, pipe := range m.pipes {
+ reg := newObserver(pipe)
+ for _, inst := range validInstruments {
+ switch o := inst.(type) {
+ case int64Observable:
+ reg.registerInt64(o.observableID)
+ case float64Observable:
+ reg.registerFloat64(o.observableID)
+ }
+ }
+
+ // Some or all instruments were valid.
+ cBack := func(ctx context.Context) error { return f(ctx, reg) }
+ unregs[ix] = pipe.addMultiCallback(cBack)
+ }
+
+ return unregisterFuncs{f: unregs}, err
+}
+
+type observer struct {
+ embedded.Observer
+
+ pipe *pipeline
+ float64 map[observableID[float64]]struct{}
+ int64 map[observableID[int64]]struct{}
+}
+
+func newObserver(p *pipeline) observer {
+ return observer{
+ pipe: p,
+ float64: make(map[observableID[float64]]struct{}),
+ int64: make(map[observableID[int64]]struct{}),
+ }
+}
+
+func (r observer) registerFloat64(id observableID[float64]) {
+ r.float64[id] = struct{}{}
+}
+
+func (r observer) registerInt64(id observableID[int64]) {
+ r.int64[id] = struct{}{}
+}
+
+var (
+ errUnknownObserver = errors.New("unknown observable instrument")
+ errUnregObserver = errors.New("observable instrument not registered for callback")
+)
+
+func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) {
+ var oImpl float64Observable
+ switch conv := o.(type) {
+ case float64Observable:
+ oImpl = conv
+ default:
+ global.Error(errUnknownObserver, "failed to record")
+ return
+ }
+
+ if _, registered := r.float64[oImpl.observableID]; !registered {
+ if !oImpl.dropAggregation {
+ global.Error(errUnregObserver, "failed to record",
+ "name", oImpl.name,
+ "description", oImpl.description,
+ "unit", oImpl.unit,
+ "number", fmt.Sprintf("%T", float64(0)),
+ )
+ }
+ return
+ }
+ c := metric.NewObserveConfig(opts)
+ // Access to r.pipe.float64Measure is already guarded by a lock in pipeline.produce.
+ // TODO (#5946): Refactor pipeline and observable measures.
+ measures := r.pipe.float64Measures[oImpl.observableID]
+ for _, m := range measures {
+ m(context.Background(), v, c.Attributes())
+ }
+}
+
+func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) {
+ var oImpl int64Observable
+ switch conv := o.(type) {
+ case int64Observable:
+ oImpl = conv
+ default:
+ global.Error(errUnknownObserver, "failed to record")
+ return
+ }
+
+ if _, registered := r.int64[oImpl.observableID]; !registered {
+ if !oImpl.dropAggregation {
+ global.Error(errUnregObserver, "failed to record",
+ "name", oImpl.name,
+ "description", oImpl.description,
+ "unit", oImpl.unit,
+ "number", fmt.Sprintf("%T", int64(0)),
+ )
+ }
+ return
+ }
+ c := metric.NewObserveConfig(opts)
+ // Access to r.pipe.int64Measures is already guarded b a lock in pipeline.produce.
+ // TODO (#5946): Refactor pipeline and observable measures.
+ measures := r.pipe.int64Measures[oImpl.observableID]
+ for _, m := range measures {
+ m(context.Background(), v, c.Attributes())
+ }
+}
+
+type noopRegister struct{ embedded.Registration }
+
+func (noopRegister) Unregister() error {
+ return nil
+}
+
+// int64InstProvider provides int64 OpenTelemetry instruments.
+type int64InstProvider struct{ *meter }
+
+func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) {
+ inst := Instrument{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ Scope: p.scope,
+ }
+ return p.int64Resolver.Aggregators(inst)
+}
+
+func (p int64InstProvider) histogramAggs(
+ name string,
+ cfg metric.Int64HistogramConfig,
+) ([]aggregate.Measure[int64], error) {
+ boundaries := cfg.ExplicitBucketBoundaries()
+ aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+ if aggError != nil {
+ // If boundaries are invalid, ignore them.
+ boundaries = nil
+ }
+ inst := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ Scope: p.scope,
+ }
+ measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries)
+ return measures, errors.Join(aggError, err)
+}
+
+// lookup returns the resolved instrumentImpl.
+func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
+ return p.int64Insts.Lookup(instID{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ }, func() (*int64Inst, error) {
+ aggs, err := p.aggs(kind, name, desc, u)
+ return &int64Inst{measures: aggs}, err
+ })
+}
+
+// lookupHistogram returns the resolved instrumentImpl.
+func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) {
+ return p.int64Insts.Lookup(instID{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ }, func() (*int64Inst, error) {
+ aggs, err := p.histogramAggs(name, cfg)
+ return &int64Inst{measures: aggs}, err
+ })
+}
+
+// float64InstProvider provides float64 OpenTelemetry instruments.
+type float64InstProvider struct{ *meter }
+
+func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) {
+ inst := Instrument{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ Scope: p.scope,
+ }
+ return p.float64Resolver.Aggregators(inst)
+}
+
+func (p float64InstProvider) histogramAggs(
+ name string,
+ cfg metric.Float64HistogramConfig,
+) ([]aggregate.Measure[float64], error) {
+ boundaries := cfg.ExplicitBucketBoundaries()
+ aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+ if aggError != nil {
+ // If boundaries are invalid, ignore them.
+ boundaries = nil
+ }
+ inst := Instrument{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ Scope: p.scope,
+ }
+ measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries)
+ return measures, errors.Join(aggError, err)
+}
+
+// lookup returns the resolved instrumentImpl.
+func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
+ return p.float64Insts.Lookup(instID{
+ Name: name,
+ Description: desc,
+ Unit: u,
+ Kind: kind,
+ }, func() (*float64Inst, error) {
+ aggs, err := p.aggs(kind, name, desc, u)
+ return &float64Inst{measures: aggs}, err
+ })
+}
+
+// lookupHistogram returns the resolved instrumentImpl.
+func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) {
+ return p.float64Insts.Lookup(instID{
+ Name: name,
+ Description: cfg.Description(),
+ Unit: cfg.Unit(),
+ Kind: InstrumentKindHistogram,
+ }, func() (*float64Inst, error) {
+ aggs, err := p.histogramAggs(name, cfg)
+ return &float64Inst{measures: aggs}, err
+ })
+}
+
+type int64Observer struct {
+ embedded.Int64Observer
+ measures[int64]
+}
+
+func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
+ c := metric.NewObserveConfig(opts)
+ o.observe(val, c.Attributes())
+}
+
+type float64Observer struct {
+ embedded.Float64Observer
+ measures[float64]
+}
+
+func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) {
+ c := metric.NewObserveConfig(opts)
+ o.observe(val, c.Attributes())
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md
new file mode 100644
index 000000000..d1390df1b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/README.md
@@ -0,0 +1,3 @@
+# SDK Metric data
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/metricdata)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
new file mode 100644
index 000000000..af835e9d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
@@ -0,0 +1,297 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package metricdata provides types for the metric SDK data model.
+package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
+
+import (
+ "encoding/json"
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+// ResourceMetrics is a collection of ScopeMetrics and the associated Resource
+// that created them.
+type ResourceMetrics struct {
+ // Resource represents the entity that collected the metrics.
+ Resource *resource.Resource
+ // ScopeMetrics are the collection of metrics with unique Scopes.
+ ScopeMetrics []ScopeMetrics
+}
+
+// ScopeMetrics is a collection of Metrics Produces by a Meter.
+type ScopeMetrics struct {
+ // Scope is the Scope that the Meter was created with.
+ Scope instrumentation.Scope
+ // Metrics are a list of aggregations created by the Meter.
+ Metrics []Metrics
+}
+
+// Metrics is a collection of one or more aggregated timeseries from an Instrument.
+type Metrics struct {
+ // Name is the name of the Instrument that created this data.
+ Name string
+ // Description is the description of the Instrument, which can be used in documentation.
+ Description string
+ // Unit is the unit in which the Instrument reports.
+ Unit string
+ // Data is the aggregated data from an Instrument.
+ Data Aggregation
+}
+
+// Aggregation is the store of data reported by an Instrument.
+// It will be one of: Gauge, Sum, Histogram.
+type Aggregation interface {
+ privateAggregation()
+}
+
+// Gauge represents a measurement of the current value of an instrument.
+type Gauge[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // Attributes.
+ DataPoints []DataPoint[N]
+}
+
+func (Gauge[N]) privateAggregation() {}
+
+// Sum represents the sum of all measurements of values from an instrument.
+type Sum[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // Attributes.
+ DataPoints []DataPoint[N]
+ // Temporality describes if the aggregation is reported as the change from the
+ // last report time, or the cumulative changes since a fixed start time.
+ Temporality Temporality
+ // IsMonotonic represents if this aggregation only increases or decreases.
+ IsMonotonic bool
+}
+
+func (Sum[N]) privateAggregation() {}
+
+// DataPoint is a single data point in a timeseries.
+type DataPoint[N int64 | float64] struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+ // StartTime is when the timeseries was started. (optional)
+ StartTime time.Time `json:",omitempty"`
+ // Time is the time when the timeseries was recorded. (optional)
+ Time time.Time `json:",omitempty"`
+ // Value is the value of this data point.
+ Value N
+
+ // Exemplars is the sampled Exemplars collected during the timeseries.
+ Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// Histogram represents the histogram of all measurements of values from an instrument.
+type Histogram[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // Attributes.
+ DataPoints []HistogramDataPoint[N]
+ // Temporality describes if the aggregation is reported as the change from the
+ // last report time, or the cumulative changes since a fixed start time.
+ Temporality Temporality
+}
+
+func (Histogram[N]) privateAggregation() {}
+
+// HistogramDataPoint is a single histogram data point in a timeseries.
+type HistogramDataPoint[N int64 | float64] struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+ // StartTime is when the timeseries was started.
+ StartTime time.Time
+ // Time is the time when the timeseries was recorded.
+ Time time.Time
+
+ // Count is the number of updates this histogram has been calculated with.
+ Count uint64
+ // Bounds are the upper bounds of the buckets of the histogram. Because the
+ // last boundary is +infinity this one is implied.
+ Bounds []float64
+ // BucketCounts is the count of each of the buckets.
+ BucketCounts []uint64
+
+ // Min is the minimum value recorded. (optional)
+ Min Extrema[N]
+ // Max is the maximum value recorded. (optional)
+ Max Extrema[N]
+ // Sum is the sum of the values recorded.
+ Sum N
+
+ // Exemplars is the sampled Exemplars collected during the timeseries.
+ Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// ExponentialHistogram represents the histogram of all measurements of values from an instrument.
+type ExponentialHistogram[N int64 | float64] struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // attributes.
+ DataPoints []ExponentialHistogramDataPoint[N]
+ // Temporality describes if the aggregation is reported as the change from the
+ // last report time, or the cumulative changes since a fixed start time.
+ Temporality Temporality
+}
+
+func (ExponentialHistogram[N]) privateAggregation() {}
+
+// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries.
+type ExponentialHistogramDataPoint[N int64 | float64] struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+ // StartTime is when the timeseries was started.
+ StartTime time.Time
+ // Time is the time when the timeseries was recorded.
+ Time time.Time
+
+ // Count is the number of updates this histogram has been calculated with.
+ Count uint64
+ // Min is the minimum value recorded. (optional)
+ Min Extrema[N]
+ // Max is the maximum value recorded. (optional)
+ Max Extrema[N]
+ // Sum is the sum of the values recorded.
+ Sum N
+
+ // Scale describes the resolution of the histogram. Boundaries are
+ // located at powers of the base, where:
+ //
+ // base = 2 ^ (2 ^ -Scale)
+ Scale int32
+ // ZeroCount is the number of values whose absolute value
+ // is less than or equal to [ZeroThreshold].
+ // When ZeroThreshold is 0, this is the number of values that
+ // cannot be expressed using the standard exponential formula
+ // as well as values that have been rounded to zero.
+ // ZeroCount represents the special zero count bucket.
+ ZeroCount uint64
+
+ // PositiveBucket is range of positive value bucket counts.
+ PositiveBucket ExponentialBucket
+ // NegativeBucket is range of negative value bucket counts.
+ NegativeBucket ExponentialBucket
+
+ // ZeroThreshold is the width of the zero region. Where the zero region is
+ // defined as the closed interval [-ZeroThreshold, ZeroThreshold].
+ ZeroThreshold float64
+
+ // Exemplars is the sampled Exemplars collected during the timeseries.
+ Exemplars []Exemplar[N] `json:",omitempty"`
+}
+
+// ExponentialBucket are a set of bucket counts, encoded in a contiguous array
+// of counts.
+type ExponentialBucket struct {
+ // Offset is the bucket index of the first entry in the Counts slice.
+ Offset int32
+ // Counts is an slice where Counts[i] carries the count of the bucket at
+ // index (Offset+i). Counts[i] is the count of values greater than
+ // base^(Offset+i) and less than or equal to base^(Offset+i+1).
+ Counts []uint64
+}
+
+// Extrema is the minimum or maximum value of a dataset.
+type Extrema[N int64 | float64] struct {
+ value N
+ valid bool
+}
+
+// MarshalText converts the Extrema value to text.
+func (e Extrema[N]) MarshalText() ([]byte, error) {
+ if !e.valid {
+ return json.Marshal(nil)
+ }
+ return json.Marshal(e.value)
+}
+
+// MarshalJSON converts the Extrema value to JSON number.
+func (e *Extrema[N]) MarshalJSON() ([]byte, error) {
+ return e.MarshalText()
+}
+
+// NewExtrema returns an Extrema set to v.
+func NewExtrema[N int64 | float64](v N) Extrema[N] {
+ return Extrema[N]{value: v, valid: true}
+}
+
+// Value returns the Extrema value and true if the Extrema is defined.
+// Otherwise, if the Extrema is its zero-value, defined will be false.
+func (e Extrema[N]) Value() (v N, defined bool) {
+ return e.value, e.valid
+}
+
+// Exemplar is a measurement sampled from a timeseries providing a typical
+// example.
+type Exemplar[N int64 | float64] struct {
+ // FilteredAttributes are the attributes recorded with the measurement but
+ // filtered out of the timeseries' aggregated data.
+ FilteredAttributes []attribute.KeyValue
+ // Time is the time when the measurement was recorded.
+ Time time.Time
+ // Value is the measured value.
+ Value N
+ // SpanID is the ID of the span that was active during the measurement. If
+ // no span was active or the span was not sampled this will be empty.
+ SpanID []byte `json:",omitempty"`
+ // TraceID is the ID of the trace the active span belonged to during the
+ // measurement. If no span was active or the span was not sampled this will
+ // be empty.
+ TraceID []byte `json:",omitempty"`
+}
+
+// Summary metric data are used to convey quantile summaries,
+// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
+// data type.
+//
+// These data points cannot always be merged in a meaningful way. The Summary
+// type is only used by bridges from other metrics libraries, and cannot be
+// produced using OpenTelemetry instrumentation.
+type Summary struct {
+ // DataPoints are the individual aggregated measurements with unique
+ // attributes.
+ DataPoints []SummaryDataPoint
+}
+
+func (Summary) privateAggregation() {}
+
+// SummaryDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Summary metric.
+type SummaryDataPoint struct {
+ // Attributes is the set of key value pairs that uniquely identify the
+ // timeseries.
+ Attributes attribute.Set
+
+ // StartTime is when the timeseries was started.
+ StartTime time.Time
+ // Time is the time when the timeseries was recorded.
+ Time time.Time
+
+ // Count is the number of updates this summary has been calculated with.
+ Count uint64
+
+ // Sum is the sum of the values recorded.
+ Sum float64
+
+ // (Optional) list of values at different quantiles of the distribution calculated
+ // from the current snapshot. The quantiles must be strictly increasing.
+ QuantileValues []QuantileValue
+}
+
+// QuantileValue is the value at a given quantile of a summary.
+type QuantileValue struct {
+ // Quantile is the quantile of this value.
+ //
+ // Must be in the interval [0.0, 1.0].
+ Quantile float64
+
+ // Value is the value at the given quantile of a summary.
+ //
+ // Quantile values must NOT be negative.
+ Value float64
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
new file mode 100644
index 000000000..2ac840ff3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
@@ -0,0 +1,30 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:generate stringer -type=Temporality
+
+package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
+
+// Temporality defines the window that an aggregation was calculated over.
+type Temporality uint8
+
+const (
+ // undefinedTemporality represents an unset Temporality.
+ //nolint:unused
+ undefinedTemporality Temporality = iota
+
+ // CumulativeTemporality defines a measurement interval that continues to
+ // expand forward in time from a starting point. New measurements are
+ // added to all previous measurements since a start time.
+ CumulativeTemporality
+
+ // DeltaTemporality defines a measurement interval that resets each cycle.
+ // Measurements from one cycle are recorded independently, measurements
+ // from other cycles do not affect them.
+ DeltaTemporality
+)
+
+// MarshalText returns the byte encoded of t.
+func (t Temporality) MarshalText() ([]byte, error) {
+ return []byte(t.String()), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
new file mode 100644
index 000000000..4da833cdc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
@@ -0,0 +1,25 @@
+// Code generated by "stringer -type=Temporality"; DO NOT EDIT.
+
+package metricdata
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[undefinedTemporality-0]
+ _ = x[CumulativeTemporality-1]
+ _ = x[DeltaTemporality-2]
+}
+
+const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality"
+
+var _Temporality_index = [...]uint8{0, 20, 41, 57}
+
+func (i Temporality) String() string {
+ if i >= Temporality(len(_Temporality_index)-1) {
+ return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
new file mode 100644
index 000000000..f08c771a6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
@@ -0,0 +1,371 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// Default periodic reader timing.
+const (
+ defaultTimeout = time.Millisecond * 30000
+ defaultInterval = time.Millisecond * 60000
+)
+
+// periodicReaderConfig contains configuration options for a PeriodicReader.
+type periodicReaderConfig struct {
+ interval time.Duration
+ timeout time.Duration
+ producers []Producer
+}
+
+// newPeriodicReaderConfig returns a periodicReaderConfig configured with
+// options.
+func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig {
+ c := periodicReaderConfig{
+ interval: envDuration(envInterval, defaultInterval),
+ timeout: envDuration(envTimeout, defaultTimeout),
+ }
+ for _, o := range options {
+ c = o.applyPeriodic(c)
+ }
+ return c
+}
+
+// PeriodicReaderOption applies a configuration option value to a PeriodicReader.
+type PeriodicReaderOption interface {
+ applyPeriodic(periodicReaderConfig) periodicReaderConfig
+}
+
+// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig.
+type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig
+
+// applyPeriodic returns a periodicReaderConfig with option(s) applied.
+func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig {
+ return o(conf)
+}
+
+// WithTimeout configures the time a PeriodicReader waits for an export to
+// complete before canceling it. This includes an export which occurs as part
+// of Shutdown or ForceFlush if the user passed context does not have a
+// deadline. If the user passed context does have a deadline, it will be used
+// instead.
+//
+// This option overrides any value set for the
+// OTEL_METRIC_EXPORT_TIMEOUT environment variable.
+//
+// If this option is not used or d is less than or equal to zero, 30 seconds
+// is used as the default.
+func WithTimeout(d time.Duration) PeriodicReaderOption {
+ return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
+ if d <= 0 {
+ return conf
+ }
+ conf.timeout = d
+ return conf
+ })
+}
+
+// WithInterval configures the intervening time between exports for a
+// PeriodicReader.
+//
+// This option overrides any value set for the
+// OTEL_METRIC_EXPORT_INTERVAL environment variable.
+//
+// If this option is not used or d is less than or equal to zero, 60 seconds
+// is used as the default.
+func WithInterval(d time.Duration) PeriodicReaderOption {
+ return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
+ if d <= 0 {
+ return conf
+ }
+ conf.interval = d
+ return conf
+ })
+}
+
+// NewPeriodicReader returns a Reader that collects and exports metric data to
+// the exporter at a defined interval. By default, the returned Reader will
+// collect and export data every 60 seconds, and will cancel any attempts that
+// exceed 30 seconds, collect and export combined. The collect and export time
+// are not counted towards the interval between attempts.
+//
+// The Collect method of the returned Reader continues to gather and return
+// metric data to the user. It will not automatically send that data to the
+// exporter. That is left to the user to accomplish.
+func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader {
+ conf := newPeriodicReaderConfig(options)
+ ctx, cancel := context.WithCancel(context.Background())
+ r := &PeriodicReader{
+ interval: conf.interval,
+ timeout: conf.timeout,
+ exporter: exporter,
+ flushCh: make(chan chan error),
+ cancel: cancel,
+ done: make(chan struct{}),
+ rmPool: sync.Pool{
+ New: func() any {
+ return &metricdata.ResourceMetrics{}
+ },
+ },
+ }
+ r.externalProducers.Store(conf.producers)
+
+ go func() {
+ defer func() { close(r.done) }()
+ r.run(ctx, conf.interval)
+ }()
+
+ return r
+}
+
+// PeriodicReader is a Reader that continuously collects and exports metric
+// data at a set interval.
+type PeriodicReader struct {
+ sdkProducer atomic.Value
+
+ mu sync.Mutex
+ isShutdown bool
+ externalProducers atomic.Value
+
+ interval time.Duration
+ timeout time.Duration
+ exporter Exporter
+ flushCh chan chan error
+
+ done chan struct{}
+ cancel context.CancelFunc
+ shutdownOnce sync.Once
+
+ rmPool sync.Pool
+}
+
+// Compile time check the periodicReader implements Reader and is comparable.
+var _ = map[Reader]struct{}{&PeriodicReader{}: {}}
+
+// newTicker allows testing override.
+var newTicker = time.NewTicker
+
+// run continuously collects and exports metric data at the specified
+// interval. This will run until ctx is canceled or times out.
+func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) {
+ ticker := newTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ err := r.collectAndExport(ctx)
+ if err != nil {
+ otel.Handle(err)
+ }
+ case errCh := <-r.flushCh:
+ errCh <- r.collectAndExport(ctx)
+ ticker.Reset(interval)
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+// register registers p as the producer of this reader.
+func (r *PeriodicReader) register(p sdkProducer) {
+ // Only register once. If producer is already set, do nothing.
+ if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
+ msg := "did not register periodic reader"
+ global.Error(errDuplicateRegister, msg)
+ }
+}
+
+// temporality reports the Temporality for the instrument kind provided.
+func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality {
+ return r.exporter.Temporality(kind)
+}
+
+// aggregation returns what Aggregation to use for kind.
+func (r *PeriodicReader) aggregation(
+ kind InstrumentKind,
+) Aggregation { // nolint:revive // import-shadow for method scoped by type.
+ return r.exporter.Aggregation(kind)
+}
+
+// collectAndExport gather all metric data related to the periodicReader r from
+// the SDK and exports it with r's exporter.
+func (r *PeriodicReader) collectAndExport(ctx context.Context) error {
+ ctx, cancel := context.WithTimeoutCause(ctx, r.timeout, errors.New("reader collect and export timeout"))
+ defer cancel()
+
+ // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect.
+ rm := r.rmPool.Get().(*metricdata.ResourceMetrics)
+ err := r.Collect(ctx, rm)
+ if err == nil {
+ err = r.export(ctx, rm)
+ }
+ r.rmPool.Put(rm)
+ return err
+}
+
+// Collect gathers all metric data related to the Reader from
+// the SDK and other Producers and stores the result in rm. The metric
+// data is not exported to the configured exporter, it is left to the caller to
+// handle that if desired.
+//
+// Collect will return an error if called after shutdown.
+// Collect will return an error if rm is a nil ResourceMetrics.
+// Collect will return an error if the context's Done channel is closed.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if rm == nil {
+ return errors.New("periodic reader: *metricdata.ResourceMetrics is nil")
+ }
+ // TODO (#3047): When collect is updated to accept output as param, pass rm.
+ return r.collect(ctx, r.sdkProducer.Load(), rm)
+}
+
+// collect unwraps p as a produceHolder and returns its produce results.
+func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.ResourceMetrics) error {
+ if p == nil {
+ return ErrReaderNotRegistered
+ }
+
+ ph, ok := p.(produceHolder)
+ if !ok {
+ // The atomic.Value is entirely in the periodicReader's control so
+ // this should never happen. In the unforeseen case that this does
+ // happen, return an error instead of panicking so a users code does
+ // not halt in the processes.
+ err := fmt.Errorf("periodic reader: invalid producer: %T", p)
+ return err
+ }
+
+ err := ph.produce(ctx, rm)
+ if err != nil {
+ return err
+ }
+ for _, producer := range r.externalProducers.Load().([]Producer) {
+ externalMetrics, e := producer.Produce(ctx)
+ if e != nil {
+ err = errors.Join(err, e)
+ }
+ rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
+ }
+
+ global.Debug("PeriodicReader collection", "Data", rm)
+
+ return err
+}
+
+// export exports metric data m using r's exporter.
+func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error {
+ return r.exporter.Export(ctx, m)
+}
+
+// ForceFlush flushes pending telemetry.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) ForceFlush(ctx context.Context) error {
+ // Prioritize the ctx timeout if it is set.
+ if _, ok := ctx.Deadline(); !ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader force flush timeout"))
+ defer cancel()
+ }
+
+ errCh := make(chan error, 1)
+ select {
+ case r.flushCh <- errCh:
+ select {
+ case err := <-errCh:
+ if err != nil {
+ return err
+ }
+ close(errCh)
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ case <-r.done:
+ return ErrReaderShutdown
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ return r.exporter.ForceFlush(ctx)
+}
+
+// Shutdown flushes pending telemetry and then stops the export pipeline.
+//
+// This method is safe to call concurrently.
+func (r *PeriodicReader) Shutdown(ctx context.Context) error {
+ err := ErrReaderShutdown
+ r.shutdownOnce.Do(func() {
+ // Prioritize the ctx timeout if it is set.
+ if _, ok := ctx.Deadline(); !ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeoutCause(ctx, r.timeout, errors.New("reader shutdown timeout"))
+ defer cancel()
+ }
+
+ // Stop the run loop.
+ r.cancel()
+ <-r.done
+
+ // Any future call to Collect will now return ErrReaderShutdown.
+ ph := r.sdkProducer.Swap(produceHolder{
+ produce: shutdownProducer{}.produce,
+ })
+
+ if ph != nil { // Reader was registered.
+ // Flush pending telemetry.
+ m := r.rmPool.Get().(*metricdata.ResourceMetrics)
+ err = r.collect(ctx, ph, m)
+ if err == nil {
+ err = r.export(ctx, m)
+ }
+ r.rmPool.Put(m)
+ }
+
+ sErr := r.exporter.Shutdown(ctx)
+ if err == nil || errors.Is(err, ErrReaderShutdown) {
+ err = sErr
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.isShutdown = true
+ // release references to Producer(s)
+ r.externalProducers.Store([]Producer{})
+ })
+ return err
+}
+
+// MarshalLog returns logging data about the PeriodicReader.
+func (r *PeriodicReader) MarshalLog() any {
+ r.mu.Lock()
+ down := r.isShutdown
+ r.mu.Unlock()
+ return struct {
+ Type string
+ Exporter Exporter
+ Registered bool
+ Shutdown bool
+ Interval time.Duration
+ Timeout time.Duration
+ }{
+ Type: "PeriodicReader",
+ Exporter: r.exporter,
+ Registered: r.sdkProducer.Load() != nil,
+ Shutdown: down,
+ Interval: r.interval,
+ Timeout: r.timeout,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
new file mode 100644
index 000000000..408fddc8d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
@@ -0,0 +1,678 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "container/list"
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/internal"
+ "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+)
+
+var (
+ errCreatingAggregators = errors.New("could not create all aggregators")
+ errIncompatibleAggregation = errors.New("incompatible aggregation")
+ errUnknownAggregation = errors.New("unrecognized aggregation")
+)
+
+// instrumentSync is a synchronization point between a pipeline and an
+// instrument's aggregate function.
+type instrumentSync struct {
+ name string
+ description string
+ unit string
+ compAgg aggregate.ComputeAggregation
+}
+
+func newPipeline(
+ res *resource.Resource,
+ reader Reader,
+ views []View,
+ exemplarFilter exemplar.Filter,
+ cardinalityLimit int,
+) *pipeline {
+ if res == nil {
+ res = resource.Empty()
+ }
+ return &pipeline{
+ resource: res,
+ reader: reader,
+ views: views,
+ int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{},
+ float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{},
+ exemplarFilter: exemplarFilter,
+ cardinalityLimit: cardinalityLimit,
+ // aggregations is lazy allocated when needed.
+ }
+}
+
+// pipeline connects all of the instruments created by a meter provider to a Reader.
+// This is the object that will be `Reader.register()` when a meter provider is created.
+//
+// As instruments are created the instrument should be checked if it exists in
+// the views of a the Reader, and if so each aggregate function should be added
+// to the pipeline.
+type pipeline struct {
+ resource *resource.Resource
+
+ reader Reader
+ views []View
+
+ sync.Mutex
+ int64Measures map[observableID[int64]][]aggregate.Measure[int64]
+ float64Measures map[observableID[float64]][]aggregate.Measure[float64]
+ aggregations map[instrumentation.Scope][]instrumentSync
+ callbacks []func(context.Context) error
+ multiCallbacks list.List
+ exemplarFilter exemplar.Filter
+ cardinalityLimit int
+}
+
+// addInt64Measure adds a new int64 measure to the pipeline for each observer.
+func (p *pipeline) addInt64Measure(id observableID[int64], m []aggregate.Measure[int64]) {
+ p.Lock()
+ defer p.Unlock()
+ p.int64Measures[id] = m
+}
+
+// addFloat64Measure adds a new float64 measure to the pipeline for each observer.
+func (p *pipeline) addFloat64Measure(id observableID[float64], m []aggregate.Measure[float64]) {
+ p.Lock()
+ defer p.Unlock()
+ p.float64Measures[id] = m
+}
+
+// addSync adds the instrumentSync to pipeline p with scope. This method is not
+// idempotent. Duplicate calls will result in duplicate additions, it is the
+// callers responsibility to ensure this is called with unique values.
+func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) {
+ p.Lock()
+ defer p.Unlock()
+ if p.aggregations == nil {
+ p.aggregations = map[instrumentation.Scope][]instrumentSync{
+ scope: {iSync},
+ }
+ return
+ }
+ p.aggregations[scope] = append(p.aggregations[scope], iSync)
+}
+
+type multiCallback func(context.Context) error
+
+// addMultiCallback registers a multi-instrument callback to be run when
+// `produce()` is called.
+func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) {
+ p.Lock()
+ defer p.Unlock()
+ e := p.multiCallbacks.PushBack(c)
+ return func() {
+ p.Lock()
+ p.multiCallbacks.Remove(e)
+ p.Unlock()
+ }
+}
+
+// produce returns aggregated metrics from a single collection.
+//
+// This method is safe to call concurrently.
+func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ // Only check if context is already cancelled before starting, not inside or after callback loops.
+ // If this method returns after executing some callbacks but before running all aggregations,
+ // internal aggregation state can be corrupted and result in incorrect data returned
+ // by future produce calls.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ p.Lock()
+ defer p.Unlock()
+
+ var err error
+ for _, c := range p.callbacks {
+ // TODO make the callbacks parallel. ( #3034 )
+ if e := c(ctx); e != nil {
+ err = errors.Join(err, e)
+ }
+ }
+ for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
+ // TODO make the callbacks parallel. ( #3034 )
+ f := e.Value.(multiCallback)
+ if e := f(ctx); e != nil {
+ err = errors.Join(err, e)
+ }
+ }
+
+ rm.Resource = p.resource
+ rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations))
+
+ i := 0
+ for scope, instruments := range p.aggregations {
+ rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments))
+ j := 0
+ for _, inst := range instruments {
+ data := rm.ScopeMetrics[i].Metrics[j].Data
+ if n := inst.compAgg(&data); n > 0 {
+ rm.ScopeMetrics[i].Metrics[j].Name = inst.name
+ rm.ScopeMetrics[i].Metrics[j].Description = inst.description
+ rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit
+ rm.ScopeMetrics[i].Metrics[j].Data = data
+ j++
+ }
+ }
+ rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j]
+ if len(rm.ScopeMetrics[i].Metrics) > 0 {
+ rm.ScopeMetrics[i].Scope = scope
+ i++
+ }
+ }
+
+ rm.ScopeMetrics = rm.ScopeMetrics[:i]
+
+ return err
+}
+
+// inserter facilitates inserting of new instruments from a single scope into a
+// pipeline.
+type inserter[N int64 | float64] struct {
+ // aggregators is a cache that holds aggregate function inputs whose
+ // outputs have been inserted into the underlying reader pipeline. This
+ // cache ensures no duplicate aggregate functions are inserted into the
+ // reader pipeline and if a new request during an instrument creation asks
+ // for the same aggregate function input the same instance is returned.
+ aggregators *cache[instID, aggVal[N]]
+
+ // views is a cache that holds instrument identifiers for all the
+ // instruments a Meter has created, it is provided from the Meter that owns
+ // this inserter. This cache ensures during the creation of instruments
+ // with the same name but different options (e.g. description, unit) a
+ // warning message is logged.
+ views *cache[string, instID]
+
+ pipeline *pipeline
+}
+
+func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] {
+ if vc == nil {
+ vc = &cache[string, instID]{}
+ }
+ return &inserter[N]{
+ aggregators: &cache[instID, aggVal[N]]{},
+ views: vc,
+ pipeline: p,
+ }
+}
+
+// Instrument inserts the instrument inst with instUnit into a pipeline. All
+// views the pipeline contains are matched against, and any matching view that
+// creates a unique aggregate function will have its output inserted into the
+// pipeline and its input included in the returned slice.
+//
+// The returned aggregate function inputs are ensured to be deduplicated and
+// unique. If another view in another pipeline that is cached by this
+// inserter's cache has already inserted the same aggregate function for the
+// same instrument, that functions input instance is returned.
+//
+// If another instrument has already been inserted by this inserter, or any
+// other using the same cache, and it conflicts with the instrument being
+// inserted in this call, an aggregate function input matching the arguments
+// will still be returned but an Info level log message will also be logged to
+// the OTel global logger.
+//
+// If the passed instrument would result in an incompatible aggregate function,
+// an error is returned and that aggregate function output is not inserted nor
+// is its input returned.
+//
+// If an instrument is determined to use a Drop aggregation, that instrument is
+// not inserted nor returned.
+func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) {
+ var (
+ matched bool
+ measures []aggregate.Measure[N]
+ )
+
+ var err error
+ seen := make(map[uint64]struct{})
+ for _, v := range i.pipeline.views {
+ stream, match := v(inst)
+ if !match {
+ continue
+ }
+ matched = true
+ in, id, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
+ if e != nil {
+ err = errors.Join(err, e)
+ }
+ if in == nil { // Drop aggregation.
+ continue
+ }
+ if _, ok := seen[id]; ok {
+ // This aggregate function has already been added.
+ continue
+ }
+ seen[id] = struct{}{}
+ measures = append(measures, in)
+ }
+
+ if err != nil {
+ err = errors.Join(errCreatingAggregators, err)
+ }
+
+ if matched {
+ return measures, err
+ }
+
+ // Apply implicit default view if no explicit matched.
+ stream := Stream{
+ Name: inst.Name,
+ Description: inst.Description,
+ Unit: inst.Unit,
+ }
+ in, _, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
+ if e != nil {
+ if err == nil {
+ err = errCreatingAggregators
+ }
+ err = errors.Join(err, e)
+ }
+ if in != nil {
+ // Ensured to have not seen given matched was false.
+ measures = append(measures, in)
+ }
+ return measures, err
+}
+
+// addCallback registers a single instrument callback to be run when
+// `produce()` is called.
+func (i *inserter[N]) addCallback(cback func(context.Context) error) {
+ i.pipeline.Lock()
+ defer i.pipeline.Unlock()
+ i.pipeline.callbacks = append(i.pipeline.callbacks, cback)
+}
+
+var aggIDCount uint64
+
+// aggVal is the cached value in an aggregators cache.
+type aggVal[N int64 | float64] struct {
+ ID uint64
+ Measure aggregate.Measure[N]
+ Err error
+}
+
+// readerDefaultAggregation returns the default aggregation for the instrument
+// kind based on the reader's aggregation preferences. This is used unless the
+// aggregation is overridden with a view.
+func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation {
+ aggregation := i.pipeline.reader.aggregation(kind)
+ switch aggregation.(type) {
+ case nil, AggregationDefault:
+ // If the reader returns default or nil use the default selector.
+ aggregation = DefaultAggregationSelector(kind)
+ default:
+ // Deep copy and validate before using.
+ aggregation = aggregation.copy()
+ if err := aggregation.err(); err != nil {
+ orig := aggregation
+ aggregation = DefaultAggregationSelector(kind)
+ global.Error(
+ err, "using default aggregation instead",
+ "aggregation", orig,
+ "replacement", aggregation,
+ )
+ }
+ }
+ return aggregation
+}
+
+// cachedAggregator returns the appropriate aggregate input and output
+// functions for an instrument configuration. If the exact instrument has been
+// created within the inst.Scope, those aggregate function instances will be
+// returned. Otherwise, new computed aggregate functions will be cached and
+// returned.
+//
+// If the instrument configuration conflicts with an instrument that has
+// already been created (e.g. description, unit, data type) a warning will be
+// logged at the "Info" level with the global OTel logger. Valid new aggregate
+// functions for the instrument configuration will still be returned without an
+// error.
+//
+// If the instrument defines an unknown or incompatible aggregation, an error
+// is returned.
+func (i *inserter[N]) cachedAggregator(
+ scope instrumentation.Scope,
+ kind InstrumentKind,
+ stream Stream,
+ readerAggregation Aggregation,
+) (meas aggregate.Measure[N], aggID uint64, err error) {
+ switch stream.Aggregation.(type) {
+ case nil:
+ // The aggregation was not overridden with a view. Use the aggregation
+ // provided by the reader.
+ stream.Aggregation = readerAggregation
+ case AggregationDefault:
+ // The view explicitly requested the default aggregation.
+ stream.Aggregation = DefaultAggregationSelector(kind)
+ }
+ if stream.ExemplarReservoirProviderSelector == nil {
+ stream.ExemplarReservoirProviderSelector = DefaultExemplarReservoirProviderSelector
+ }
+
+ if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil {
+ return nil, 0, fmt.Errorf(
+ "creating aggregator with instrumentKind: %d, aggregation %v: %w",
+ kind, stream.Aggregation, err,
+ )
+ }
+
+ id := i.instID(kind, stream)
+ // If there is a conflict, the specification says the view should
+ // still be applied and a warning should be logged.
+ i.logConflict(id)
+
+ // If there are requests for the same instrument with different name
+ // casing, the first-seen needs to be returned. Use a normalize ID for the
+ // cache lookup to ensure the correct comparison.
+ normID := id.normalize()
+ cv := i.aggregators.Lookup(normID, func() aggVal[N] {
+ b := aggregate.Builder[N]{
+ Temporality: i.pipeline.reader.temporality(kind),
+ ReservoirFunc: reservoirFunc[N](
+ stream.ExemplarReservoirProviderSelector(stream.Aggregation),
+ i.pipeline.exemplarFilter,
+ ),
+ }
+ b.Filter = stream.AttributeFilter
+ // A value less than or equal to zero will disable the aggregation
+ // limits for the builder (an all the created aggregates).
+ // cardinalityLimit will be 0 by default if unset (or
+ // unrecognized input). Use that value directly.
+ b.AggregationLimit = i.pipeline.cardinalityLimit
+ in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
+ if err != nil {
+ return aggVal[N]{0, nil, err}
+ }
+ if in == nil { // Drop aggregator.
+ return aggVal[N]{0, nil, nil}
+ }
+ i.pipeline.addSync(scope, instrumentSync{
+ // Use the first-seen name casing for this and all subsequent
+ // requests of this instrument.
+ name: stream.Name,
+ description: stream.Description,
+ unit: stream.Unit,
+ compAgg: out,
+ })
+ id := atomic.AddUint64(&aggIDCount, 1)
+ return aggVal[N]{id, in, err}
+ })
+ return cv.Measure, cv.ID, cv.Err
+}
+
+// logConflict validates if an instrument with the same case-insensitive name
+// as id has already been created. If that instrument conflicts with id, a
+// warning is logged.
+func (i *inserter[N]) logConflict(id instID) {
+ // The API specification defines names as case-insensitive. If there is a
+ // different casing of a name it needs to be a conflict.
+ name := id.normalize().Name
+ existing := i.views.Lookup(name, func() instID { return id })
+ if id == existing {
+ return
+ }
+
+ const msg = "duplicate metric stream definitions"
+ args := []any{
+ "names", fmt.Sprintf("%q, %q", existing.Name, id.Name),
+ "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description),
+ "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind),
+ "units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit),
+ "numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number),
+ }
+
+ // The specification recommends logging a suggested view to resolve
+ // conflicts if possible.
+ //
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration
+ if id.Unit != existing.Unit || id.Number != existing.Number {
+ // There is no view resolution for these, don't make a suggestion.
+ global.Warn(msg, args...)
+ return
+ }
+
+ var stream string
+ if id.Name != existing.Name || id.Kind != existing.Kind {
+ stream = `Stream{Name: "{{NEW_NAME}}"}`
+ } else if id.Description != existing.Description {
+ stream = fmt.Sprintf("Stream{Description: %q}", existing.Description)
+ }
+
+ inst := fmt.Sprintf(
+ "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
+ id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
+ )
+ args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream))
+
+ global.Warn(msg, args...)
+}
+
+func (*inserter[N]) instID(kind InstrumentKind, stream Stream) instID {
+ var zero N
+ return instID{
+ Name: stream.Name,
+ Description: stream.Description,
+ Unit: stream.Unit,
+ Kind: kind,
+ Number: fmt.Sprintf("%T", zero),
+ }
+}
+
+// aggregateFunc returns new aggregate functions matching agg, kind, and
+// monotonic. If the agg is unknown or temporality is invalid, an error is
+// returned.
+func (i *inserter[N]) aggregateFunc(
+ b aggregate.Builder[N],
+ agg Aggregation,
+ kind InstrumentKind,
+) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) {
+ switch a := agg.(type) {
+ case AggregationDefault:
+ return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind)
+ case AggregationDrop:
+ // Return nil in and out to signify the drop aggregator.
+ case AggregationLastValue:
+ switch kind {
+ case InstrumentKindGauge:
+ meas, comp = b.LastValue()
+ case InstrumentKindObservableGauge:
+ meas, comp = b.PrecomputedLastValue()
+ }
+ case AggregationSum:
+ switch kind {
+ case InstrumentKindObservableCounter:
+ meas, comp = b.PrecomputedSum(true)
+ case InstrumentKindObservableUpDownCounter:
+ meas, comp = b.PrecomputedSum(false)
+ case InstrumentKindCounter, InstrumentKindHistogram:
+ meas, comp = b.Sum(true)
+ default:
+ // InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and
+ // instrumentKindUndefined or other invalid instrument kinds.
+ meas, comp = b.Sum(false)
+ }
+ case AggregationExplicitBucketHistogram:
+ var noSum bool
+ switch kind {
+ case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge:
+ // The sum should not be collected for any instrument that can make
+ // negative measurements:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
+ noSum = true
+ }
+ meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum)
+ case AggregationBase2ExponentialHistogram:
+ var noSum bool
+ switch kind {
+ case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge:
+ // The sum should not be collected for any instrument that can make
+ // negative measurements:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
+ noSum = true
+ }
+ meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum)
+
+ default:
+ err = errUnknownAggregation
+ }
+
+ return meas, comp, err
+}
+
+// isAggregatorCompatible checks if the aggregation can be used by the instrument.
+// Current compatibility:
+//
+// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram |
+// |--------------------------|------|-----------|-----|-----------|-----------------------|
+// | Counter | ✓ | | ✓ | ✓ | ✓ |
+// | UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
+// | Histogram | ✓ | | ✓ | ✓ | ✓ |
+// | Gauge | ✓ | ✓ | | ✓ | ✓ |
+// | Observable Counter | ✓ | | ✓ | ✓ | ✓ |
+// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
+// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |.
+func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
+ switch agg.(type) {
+ case AggregationDefault:
+ return nil
+ case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram:
+ switch kind {
+ case InstrumentKindCounter,
+ InstrumentKindUpDownCounter,
+ InstrumentKindHistogram,
+ InstrumentKindGauge,
+ InstrumentKindObservableCounter,
+ InstrumentKindObservableUpDownCounter,
+ InstrumentKindObservableGauge:
+ return nil
+ default:
+ return errIncompatibleAggregation
+ }
+ case AggregationSum:
+ switch kind {
+ case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter:
+ return nil
+ default:
+ // TODO: review need for aggregation check after
+ // https://github.com/open-telemetry/opentelemetry-specification/issues/2710
+ return errIncompatibleAggregation
+ }
+ case AggregationLastValue:
+ switch kind {
+ case InstrumentKindObservableGauge, InstrumentKindGauge:
+ return nil
+ }
+ // TODO: review need for aggregation check after
+ // https://github.com/open-telemetry/opentelemetry-specification/issues/2710
+ return errIncompatibleAggregation
+ case AggregationDrop:
+ return nil
+ default:
+ // This is used passed checking for default, it should be an error at this point.
+ return fmt.Errorf("%w: %v", errUnknownAggregation, agg)
+ }
+}
+
+// pipelines is the group of pipelines connecting Readers with instrument
+// measurement.
+type pipelines []*pipeline
+
+func newPipelines(
+ res *resource.Resource,
+ readers []Reader,
+ views []View,
+ exemplarFilter exemplar.Filter,
+ cardinalityLimit int,
+) pipelines {
+ pipes := make([]*pipeline, 0, len(readers))
+ for _, r := range readers {
+ p := newPipeline(res, r, views, exemplarFilter, cardinalityLimit)
+ r.register(p)
+ pipes = append(pipes, p)
+ }
+ return pipes
+}
+
+type unregisterFuncs struct {
+ embedded.Registration
+ f []func()
+}
+
+func (u unregisterFuncs) Unregister() error {
+ for _, f := range u.f {
+ f()
+ }
+ return nil
+}
+
+// resolver facilitates resolving aggregate functions an instrument calls to
+// aggregate measurements with while updating all pipelines that need to pull
+// from those aggregations.
+type resolver[N int64 | float64] struct {
+ inserters []*inserter[N]
+}
+
+func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] {
+ in := make([]*inserter[N], len(p))
+ for i := range in {
+ in[i] = newInserter[N](p[i], vc)
+ }
+ return resolver[N]{in}
+}
+
+// Aggregators returns the Aggregators that must be updated by the instrument
+// defined by key.
+func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) {
+ var measures []aggregate.Measure[N]
+
+ var err error
+ for _, i := range r.inserters {
+ in, e := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
+ if e != nil {
+ err = errors.Join(err, e)
+ }
+ measures = append(measures, in...)
+ }
+ return measures, err
+}
+
+// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument
+// defined by key. If boundaries were provided on instrument instantiation, those take precedence
+// over boundaries provided by the reader.
+func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) {
+ var measures []aggregate.Measure[N]
+
+ var err error
+ for _, i := range r.inserters {
+ agg := i.readerDefaultAggregation(id.Kind)
+ if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 {
+ histAgg.Boundaries = boundaries
+ agg = histAgg
+ }
+ in, e := i.Instrument(id, agg)
+ if e != nil {
+ err = errors.Join(err, e)
+ }
+ measures = append(measures, in...)
+ }
+ return measures, err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
new file mode 100644
index 000000000..b0a6ec580
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
@@ -0,0 +1,145 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+ "go.opentelemetry.io/otel/metric/noop"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+)
+
+// MeterProvider handles the creation and coordination of Meters. All Meters
+// created by a MeterProvider will be associated with the same Resource, have
+// the same Views applied to them, and have their produced metric telemetry
+// passed to the configured Readers.
+type MeterProvider struct {
+ embedded.MeterProvider
+
+ pipes pipelines
+ meters cache[instrumentation.Scope, *meter]
+
+ forceFlush, shutdown func(context.Context) error
+ stopped atomic.Bool
+}
+
+// Compile-time check MeterProvider implements metric.MeterProvider.
+var _ metric.MeterProvider = (*MeterProvider)(nil)
+
+// NewMeterProvider returns a new and configured MeterProvider.
+//
+// By default, the returned MeterProvider is configured with the default
+// Resource and no Readers. Readers cannot be added after a MeterProvider is
+// created. This means the returned MeterProvider, one created with no
+// Readers, will perform no operations.
+func NewMeterProvider(options ...Option) *MeterProvider {
+ conf := newConfig(options)
+ flush, sdown := conf.readerSignals()
+
+ mp := &MeterProvider{
+ pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter, conf.cardinalityLimit),
+ forceFlush: flush,
+ shutdown: sdown,
+ }
+ // Log after creation so all readers show correctly they are registered.
+ global.Info("MeterProvider created",
+ "Resource", conf.res,
+ "Readers", conf.readers,
+ "Views", len(conf.views),
+ )
+ return mp
+}
+
+// Meter returns a Meter with the given name and configured with options.
+//
+// The name should be the name of the instrumentation scope creating
+// telemetry. This name may be the same as the instrumented code only if that
+// code provides built-in instrumentation.
+//
+// Calls to the Meter method after Shutdown has been called will return Meters
+// that perform no operations.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter {
+ if name == "" {
+ global.Warn("Invalid Meter name.", "name", name)
+ }
+
+ if mp.stopped.Load() {
+ return noop.Meter{}
+ }
+
+ c := metric.NewMeterConfig(options...)
+ s := instrumentation.Scope{
+ Name: name,
+ Version: c.InstrumentationVersion(),
+ SchemaURL: c.SchemaURL(),
+ Attributes: c.InstrumentationAttributes(),
+ }
+
+ global.Info("Meter created",
+ "Name", s.Name,
+ "Version", s.Version,
+ "SchemaURL", s.SchemaURL,
+ "Attributes", s.Attributes,
+ )
+
+ return mp.meters.Lookup(s, func() *meter {
+ return newMeter(s, mp.pipes)
+ })
+}
+
+// ForceFlush flushes all pending telemetry.
+//
+// This method honors the deadline or cancellation of ctx. An appropriate
+// error will be returned in these situations. There is no guaranteed that all
+// telemetry be flushed or all resources have been released in these
+// situations.
+//
+// ForceFlush calls ForceFlush(context.Context) error
+// on all Readers that implements this method.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) ForceFlush(ctx context.Context) error {
+ if mp.forceFlush != nil {
+ return mp.forceFlush(ctx)
+ }
+ return nil
+}
+
+// Shutdown shuts down the MeterProvider flushing all pending telemetry and
+// releasing any held computational resources.
+//
+// This call is idempotent. The first call will perform all flush and
+// releasing operations. Subsequent calls will perform no action and will
+// return an error stating this.
+//
+// Measurements made by instruments from meters this MeterProvider created
+// will not be exported after Shutdown is called.
+//
+// This method honors the deadline or cancellation of ctx. An appropriate
+// error will be returned in these situations. There is no guaranteed that all
+// telemetry be flushed or all resources have been released in these
+// situations.
+//
+// This method is safe to call concurrently.
+func (mp *MeterProvider) Shutdown(ctx context.Context) error {
+ // Even though it may seem like there is a synchronization issue between the
+ // call to `Store` and checking `shutdown`, the Go concurrency model ensures
+ // that is not the case, as all the atomic operations executed in a program
+ // behave as though executed in some sequentially consistent order. This
+ // definition provides the same semantics as C++'s sequentially consistent
+ // atomics and Java's volatile variables.
+ // See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic.
+
+ mp.stopped.Store(true)
+ if mp.shutdown != nil {
+ return mp.shutdown(ctx)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
new file mode 100644
index 000000000..5c1cea825
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
@@ -0,0 +1,192 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "context"
+ "errors"
+
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+)
+
+// errDuplicateRegister is logged by a Reader when an attempt to registered it
+// more than once occurs.
+var errDuplicateRegister = errors.New("duplicate reader registration")
+
+// ErrReaderNotRegistered is returned if Collect or Shutdown are called before
+// the reader is registered with a MeterProvider.
+var ErrReaderNotRegistered = errors.New("reader is not registered")
+
+// ErrReaderShutdown is returned if Collect or Shutdown are called after a
+// reader has been Shutdown once.
+var ErrReaderShutdown = errors.New("reader is shutdown")
+
+// errNonPositiveDuration is logged when an environmental variable
+// has non-positive value.
+var errNonPositiveDuration = errors.New("non-positive duration")
+
+// Reader is the interface used between the SDK and an
+// exporter. Control flow is bi-directional through the
+// Reader, since the SDK initiates ForceFlush and Shutdown
+// while the exporter initiates collection. The Register() method here
+// informs the Reader that it can begin reading, signaling the
+// start of bi-directional control flow.
+//
+// Typically, push-based exporters that are periodic will
+// implement PeriodicExporter themselves and construct a
+// PeriodicReader to satisfy this interface.
+//
+// Pull-based exporters will typically implement Register
+// themselves, since they read on demand.
+//
+// Warning: methods may be added to this interface in minor releases.
+type Reader interface {
+ // register registers a Reader with a MeterProvider.
+ // The producer argument allows the Reader to signal the sdk to collect
+ // and send aggregated metric measurements.
+ register(sdkProducer)
+
+ // temporality reports the Temporality for the instrument kind provided.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Reader methods.
+ temporality(InstrumentKind) metricdata.Temporality
+
+ // aggregation returns what Aggregation to use for an instrument kind.
+ //
+ // This method needs to be concurrent safe with itself and all the other
+ // Reader methods.
+ aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type.
+
+ // Collect gathers and returns all metric data related to the Reader from
+ // the SDK and stores it in rm. An error is returned if this is called
+ // after Shutdown or if rm is nil.
+ //
+ // This method needs to be concurrent safe, and the cancellation of the
+ // passed context is expected to be honored.
+ Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown flushes all metric measurements held in an export pipeline and releases any
+ // held computational resources.
+ //
+ // This deadline or cancellation of the passed context are honored. An appropriate
+ // error will be returned in these situations. There is no guaranteed that all
+ // telemetry be flushed or all resources have been released in these
+ // situations.
+ //
+ // After Shutdown is called, calls to Collect will perform no operation and instead will return
+ // an error indicating the shutdown state.
+ //
+ // This method needs to be concurrent safe.
+ Shutdown(context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// sdkProducer produces metrics for a Reader.
+type sdkProducer interface {
+ // produce returns aggregated metrics from a single collection.
+ //
+ // This method is safe to call concurrently.
+ produce(context.Context, *metricdata.ResourceMetrics) error
+}
+
+// Producer produces metrics for a Reader from an external source.
+type Producer interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Produce returns aggregated metrics from an external source.
+ //
+ // This method should be safe to call concurrently.
+ Produce(context.Context) ([]metricdata.ScopeMetrics, error)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// produceHolder is used as an atomic.Value to wrap the non-concrete producer
+// type.
+type produceHolder struct {
+ produce func(context.Context, *metricdata.ResourceMetrics) error
+}
+
+// shutdownProducer produces an ErrReaderShutdown error always.
+type shutdownProducer struct{}
+
+// produce returns an ErrReaderShutdown error.
+func (shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error {
+ return ErrReaderShutdown
+}
+
+// TemporalitySelector selects the temporality to use based on the InstrumentKind.
+type TemporalitySelector func(InstrumentKind) metricdata.Temporality
+
+// DefaultTemporalitySelector is the default TemporalitySelector used if
+// WithTemporalitySelector is not provided. CumulativeTemporality will be used
+// for all instrument kinds if this TemporalitySelector is used.
+func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality {
+ return metricdata.CumulativeTemporality
+}
+
+// AggregationSelector selects the aggregation and the parameters to use for
+// that aggregation based on the InstrumentKind.
+//
+// If the Aggregation returned is nil or DefaultAggregation, the selection from
+// DefaultAggregationSelector will be used.
+type AggregationSelector func(InstrumentKind) Aggregation
+
+// DefaultAggregationSelector returns the default aggregation and parameters
+// that will be used to summarize measurement made from an instrument of
+// InstrumentKind. This AggregationSelector using the following selection
+// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum,
+// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue,
+// Histogram ⇨ ExplicitBucketHistogram.
+func DefaultAggregationSelector(ik InstrumentKind) Aggregation {
+ switch ik {
+ case InstrumentKindCounter,
+ InstrumentKindUpDownCounter,
+ InstrumentKindObservableCounter,
+ InstrumentKindObservableUpDownCounter:
+ return AggregationSum{}
+ case InstrumentKindObservableGauge, InstrumentKindGauge:
+ return AggregationLastValue{}
+ case InstrumentKindHistogram:
+ return AggregationExplicitBucketHistogram{
+ Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000},
+ NoMinMax: false,
+ }
+ }
+ panic("unknown instrument kind")
+}
+
+// ReaderOption is an option which can be applied to manual or Periodic
+// readers.
+type ReaderOption interface {
+ PeriodicReaderOption
+ ManualReaderOption
+}
+
+// WithProducer registers producers as an external Producer of metric data
+// for this Reader.
+func WithProducer(p Producer) ReaderOption {
+ return producerOption{p: p}
+}
+
+type producerOption struct {
+ p Producer
+}
+
+// applyManual returns a manualReaderConfig with option applied.
+func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig {
+ c.producers = append(c.producers, o.p)
+ return c
+}
+
+// applyPeriodic returns a periodicReaderConfig with option applied.
+func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig {
+ c.producers = append(c.producers, o.p)
+ return c
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
new file mode 100644
index 000000000..dd9051a76
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+// version is the current release version of the metric SDK in use.
+func version() string {
+ return "1.38.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
new file mode 100644
index 000000000..630890f42
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go
@@ -0,0 +1,118 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/sdk/metric"
+
+import (
+ "errors"
+ "regexp"
+ "strings"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+var (
+ errMultiInst = errors.New("name replacement for multiple instruments")
+ errEmptyView = errors.New("no criteria provided for view")
+
+ emptyView = func(Instrument) (Stream, bool) { return Stream{}, false }
+)
+
+// View is an override to the default behavior of the SDK. It defines how data
+// should be collected for certain instruments. It returns true and the exact
+// Stream to use for matching Instruments. Otherwise, if the view does not
+// match, false is returned.
+type View func(Instrument) (Stream, bool)
+
+// NewView returns a View that applies the Stream mask for all instruments that
+// match criteria. The returned View will only apply mask if all non-zero-value
+// fields of criteria match the corresponding Instrument passed to the view. If
+// no criteria are provided, all field of criteria are their zero-values, a
+// view that matches no instruments is returned. If you need to match a
+// zero-value field, create a View directly.
+//
+// The Name field of criteria supports wildcard pattern matching. The "*"
+// wildcard is recognized as matching zero or more characters, and "?" is
+// recognized as matching exactly one character. For example, a pattern of "*"
+// matches all instrument names.
+//
+// The Stream mask only applies updates for non-zero-value fields. By default,
+// the Instrument the View matches against will be use for the Name,
+// Description, and Unit of the returned Stream and no Aggregation or
+// AttributeFilter are set. All non-zero-value fields of mask are used instead
+// of the default. If you need to zero out an Stream field returned from a
+// View, create a View directly.
+func NewView(criteria Instrument, mask Stream) View {
+ if criteria.IsEmpty() {
+ global.Error(
+ errEmptyView, "dropping view",
+ "mask", mask,
+ )
+ return emptyView
+ }
+
+ var matchFunc func(Instrument) bool
+ if strings.ContainsAny(criteria.Name, "*?") {
+ if mask.Name != "" {
+ global.Error(
+ errMultiInst, "dropping view",
+ "criteria", criteria,
+ "mask", mask,
+ )
+ return emptyView
+ }
+
+ // Handle branching here in NewView instead of criteria.matches so
+ // criteria.matches remains inlinable for the simple case.
+ pattern := regexp.QuoteMeta(criteria.Name)
+ pattern = "^" + pattern + "$"
+ pattern = strings.ReplaceAll(pattern, `\?`, ".")
+ pattern = strings.ReplaceAll(pattern, `\*`, ".*")
+ re := regexp.MustCompile(pattern)
+ matchFunc = func(i Instrument) bool {
+ return re.MatchString(i.Name) &&
+ criteria.matchesDescription(i) &&
+ criteria.matchesKind(i) &&
+ criteria.matchesUnit(i) &&
+ criteria.matchesScope(i)
+ }
+ } else {
+ matchFunc = criteria.matches
+ }
+
+ var agg Aggregation
+ if mask.Aggregation != nil {
+ agg = mask.Aggregation.copy()
+ if err := agg.err(); err != nil {
+ global.Error(
+ err, "not using aggregation with view",
+ "criteria", criteria,
+ "mask", mask,
+ )
+ agg = nil
+ }
+ }
+
+ return func(i Instrument) (Stream, bool) {
+ if matchFunc(i) {
+ return Stream{
+ Name: nonZero(mask.Name, i.Name),
+ Description: nonZero(mask.Description, i.Description),
+ Unit: nonZero(mask.Unit, i.Unit),
+ Aggregation: agg,
+ AttributeFilter: mask.AttributeFilter,
+ ExemplarReservoirProviderSelector: mask.ExemplarReservoirProviderSelector,
+ }, true
+ }
+ return Stream{}, false
+ }
+}
+
+// nonZero returns v if it is non-zero-valued, otherwise alt.
+func nonZero[T comparable](v, alt T) T {
+ var zero T
+ if v != zero {
+ return v
+ }
+ return alt
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
new file mode 100644
index 000000000..4ad864d71
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
@@ -0,0 +1,3 @@
+# SDK Resource
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
new file mode 100644
index 000000000..c02aeefdd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+)
+
+// ErrPartialResource is returned by a detector when complete source
+// information for a Resource is unavailable or the source information
+// contains invalid values that are omitted from the returned Resource.
+var ErrPartialResource = errors.New("partial resource")
+
+// Detector detects OpenTelemetry resource information.
+type Detector interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Detect returns an initialized Resource based on gathered information.
+ // If the source information to construct a Resource contains invalid
+ // values, a Resource is returned with the valid parts of the source
+ // information used for initialization along with an appropriately
+ // wrapped ErrPartialResource error.
+ Detect(ctx context.Context) (*Resource, error)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// Detect returns a new [Resource] merged from all the Resources each of the
+// detectors produces. Each of the detectors are called sequentially, in the
+// order they are passed, merging the produced resource into the previous.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if that error is returned from a detector. It may also
+// return a merge-conflicting Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from different detectors results
+// in a schema URL conflict. It is up to the caller to determine if this
+// returned Resource should be used or not.
+//
+// If one of the detectors returns an error that is not [ErrPartialResource],
+// the resource produced by the detector will not be merged and the returned
+// error will wrap that detector's error.
+func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
+ r := new(Resource)
+ return r, detect(ctx, r, detectors)
+}
+
+// detect runs all detectors using ctx and merges the result into res. This
+// assumes res is allocated and not nil, it will panic otherwise.
+//
+// If the detectors or merging resources produces any errors (i.e.
+// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of
+// these errors will be returned. Otherwise, nil is returned.
+func detect(ctx context.Context, res *Resource, detectors []Detector) error {
+ var (
+ r *Resource
+ err error
+ e error
+ )
+
+ for _, detector := range detectors {
+ if detector == nil {
+ continue
+ }
+ r, e = detector.Detect(ctx)
+ if e != nil {
+ err = errors.Join(err, e)
+ if !errors.Is(e, ErrPartialResource) {
+ continue
+ }
+ }
+ r, e = Merge(res, r)
+ if e != nil {
+ err = errors.Join(err, e)
+ }
+ *res = *r
+ }
+
+ if err != nil {
+ if errors.Is(err, ErrSchemaURLConflict) {
+ // If there has been a merge conflict, ensure the resource has no
+ // schema URL.
+ res.schemaURL = ""
+ }
+
+ err = fmt.Errorf("error detecting resource: %w", err)
+ }
+ return err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
new file mode 100644
index 000000000..3f20eb7a5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -0,0 +1,116 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/google/uuid"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+)
+
+type (
+ // telemetrySDK is a Detector that provides information about
+ // the OpenTelemetry SDK used. This Detector is included as a
+ // builtin. If these resource attributes are not wanted, use
+ // resource.New() to explicitly disable them.
+ telemetrySDK struct{}
+
+ // host is a Detector that provides information about the host
+ // being run on. This Detector is included as a builtin. If
+ // these resource attributes are not wanted, use the
+ // resource.New() to explicitly disable them.
+ host struct{}
+
+ stringDetector struct {
+ schemaURL string
+ K attribute.Key
+ F func() (string, error)
+ }
+
+ defaultServiceNameDetector struct{}
+
+ defaultServiceInstanceIDDetector struct{}
+)
+
+var (
+ _ Detector = telemetrySDK{}
+ _ Detector = host{}
+ _ Detector = stringDetector{}
+ _ Detector = defaultServiceNameDetector{}
+ _ Detector = defaultServiceInstanceIDDetector{}
+)
+
+// Detect returns a *Resource that describes the OpenTelemetry SDK used.
+func (telemetrySDK) Detect(context.Context) (*Resource, error) {
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.TelemetrySDKName("opentelemetry"),
+ semconv.TelemetrySDKLanguageGo,
+ semconv.TelemetrySDKVersion(sdk.Version()),
+ ), nil
+}
+
+// Detect returns a *Resource that describes the host being run on.
+func (host) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx)
+}
+
+// StringDetector returns a Detector that will produce a *Resource
+// containing the string as a value corresponding to k. The resulting Resource
+// will have the specified schemaURL.
+func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector {
+ return stringDetector{schemaURL: schemaURL, K: k, F: f}
+}
+
+// Detect returns a *Resource that describes the string as a value
+// corresponding to attribute.Key as well as the specific schemaURL.
+func (sd stringDetector) Detect(context.Context) (*Resource, error) {
+ value, err := sd.F()
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", string(sd.K), err)
+ }
+ a := sd.K.String(value)
+ if !a.Valid() {
+ return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit())
+ }
+ return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil
+}
+
+// Detect implements Detector.
+func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(
+ semconv.SchemaURL,
+ semconv.ServiceNameKey,
+ func() (string, error) {
+ executable, err := os.Executable()
+ if err != nil {
+ return "unknown_service:go", nil
+ }
+ return "unknown_service:" + filepath.Base(executable), nil
+ },
+ ).Detect(ctx)
+}
+
+// Detect implements Detector.
+func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(
+ semconv.SchemaURL,
+ semconv.ServiceInstanceIDKey,
+ func() (string, error) {
+ version4Uuid, err := uuid.NewRandom()
+ if err != nil {
+ return "", err
+ }
+
+ return version4Uuid.String(), nil
+ },
+ ).Detect(ctx)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
new file mode 100644
index 000000000..0d6e213d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
@@ -0,0 +1,195 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// config contains configuration for Resource creation.
+type config struct {
+ // detectors that will be evaluated.
+ detectors []Detector
+ // SchemaURL to associate with the Resource.
+ schemaURL string
+}
+
+// Option is the interface that applies a configuration option.
+type Option interface {
+ // apply sets the Option value of a config.
+ apply(config) config
+}
+
+// WithAttributes adds attributes to the configured Resource.
+func WithAttributes(attributes ...attribute.KeyValue) Option {
+ return WithDetectors(detectAttributes{attributes})
+}
+
+type detectAttributes struct {
+ attributes []attribute.KeyValue
+}
+
+func (d detectAttributes) Detect(context.Context) (*Resource, error) {
+ return NewSchemaless(d.attributes...), nil
+}
+
+// WithDetectors adds detectors to be evaluated for the configured resource.
+func WithDetectors(detectors ...Detector) Option {
+ return detectorsOption{detectors: detectors}
+}
+
+type detectorsOption struct {
+ detectors []Detector
+}
+
+func (o detectorsOption) apply(cfg config) config {
+ cfg.detectors = append(cfg.detectors, o.detectors...)
+ return cfg
+}
+
+// WithFromEnv adds attributes from environment variables to the configured resource.
+func WithFromEnv() Option {
+ return WithDetectors(fromEnv{})
+}
+
+// WithHost adds attributes from the host to the configured resource.
+func WithHost() Option {
+ return WithDetectors(host{})
+}
+
+// WithHostID adds host ID information to the configured resource.
+func WithHostID() Option {
+ return WithDetectors(hostIDDetector{})
+}
+
+// WithTelemetrySDK adds TelemetrySDK version info to the configured resource.
+func WithTelemetrySDK() Option {
+ return WithDetectors(telemetrySDK{})
+}
+
+// WithSchemaURL sets the schema URL for the configured resource.
+func WithSchemaURL(schemaURL string) Option {
+ return schemaURLOption(schemaURL)
+}
+
+type schemaURLOption string
+
+func (o schemaURLOption) apply(cfg config) config {
+ cfg.schemaURL = string(o)
+ return cfg
+}
+
+// WithOS adds all the OS attributes to the configured Resource.
+// See individual WithOS* functions to configure specific attributes.
+func WithOS() Option {
+ return WithDetectors(
+ osTypeDetector{},
+ osDescriptionDetector{},
+ )
+}
+
+// WithOSType adds an attribute with the operating system type to the configured Resource.
+func WithOSType() Option {
+ return WithDetectors(osTypeDetector{})
+}
+
+// WithOSDescription adds an attribute with the operating system description to the
+// configured Resource. The formatted string is equivalent to the output of the
+// `uname -snrvm` command.
+func WithOSDescription() Option {
+ return WithDetectors(osDescriptionDetector{})
+}
+
+// WithProcess adds all the Process attributes to the configured Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+//
+// This option is equivalent to calling WithProcessPID,
+// WithProcessExecutableName, WithProcessExecutablePath,
+// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName,
+// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each
+// option function for information about what resource attributes each
+// includes.
+func WithProcess() Option {
+ return WithDetectors(
+ processPIDDetector{},
+ processExecutableNameDetector{},
+ processExecutablePathDetector{},
+ processCommandArgsDetector{},
+ processOwnerDetector{},
+ processRuntimeNameDetector{},
+ processRuntimeVersionDetector{},
+ processRuntimeDescriptionDetector{},
+ )
+}
+
+// WithProcessPID adds an attribute with the process identifier (PID) to the
+// configured Resource.
+func WithProcessPID() Option {
+ return WithDetectors(processPIDDetector{})
+}
+
+// WithProcessExecutableName adds an attribute with the name of the process
+// executable to the configured Resource.
+func WithProcessExecutableName() Option {
+ return WithDetectors(processExecutableNameDetector{})
+}
+
+// WithProcessExecutablePath adds an attribute with the full path to the process
+// executable to the configured Resource.
+func WithProcessExecutablePath() Option {
+ return WithDetectors(processExecutablePathDetector{})
+}
+
+// WithProcessCommandArgs adds an attribute with all the command arguments (including
+// the command/executable itself) as received by the process to the configured
+// Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+func WithProcessCommandArgs() Option {
+ return WithDetectors(processCommandArgsDetector{})
+}
+
+// WithProcessOwner adds an attribute with the username of the user that owns the process
+// to the configured Resource.
+func WithProcessOwner() Option {
+ return WithDetectors(processOwnerDetector{})
+}
+
+// WithProcessRuntimeName adds an attribute with the name of the runtime of this
+// process to the configured Resource.
+func WithProcessRuntimeName() Option {
+ return WithDetectors(processRuntimeNameDetector{})
+}
+
+// WithProcessRuntimeVersion adds an attribute with the version of the runtime of
+// this process to the configured Resource.
+func WithProcessRuntimeVersion() Option {
+ return WithDetectors(processRuntimeVersionDetector{})
+}
+
+// WithProcessRuntimeDescription adds an attribute with an additional description
+// about the runtime of the process to the configured Resource.
+func WithProcessRuntimeDescription() Option {
+ return WithDetectors(processRuntimeDescriptionDetector{})
+}
+
+// WithContainer adds all the Container attributes to the configured Resource.
+// See individual WithContainer* functions to configure specific attributes.
+func WithContainer() Option {
+ return WithDetectors(
+ cgroupContainerIDDetector{},
+ )
+}
+
+// WithContainerID adds an attribute with the id of the container to the configured Resource.
+// Note: WithContainerID will not extract the correct container ID in an ECS environment.
+// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs).
+func WithContainerID() Option {
+ return WithDetectors(cgroupContainerIDDetector{})
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
new file mode 100644
index 000000000..bbe142d20
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "io"
+ "os"
+ "regexp"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+)
+
+type containerIDProvider func() (string, error)
+
+var (
+ containerID containerIDProvider = getContainerIDFromCGroup
+ cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`)
+)
+
+type cgroupContainerIDDetector struct{}
+
+const cgroupPath = "/proc/self/cgroup"
+
+// Detect returns a *Resource that describes the id of the container.
+// If no container id found, an empty resource will be returned.
+func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) {
+ containerID, err := containerID()
+ if err != nil {
+ return nil, err
+ }
+
+ if containerID == "" {
+ return Empty(), nil
+ }
+ return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil
+}
+
+var (
+ defaultOSStat = os.Stat
+ osStat = defaultOSStat
+
+ defaultOSOpen = func(name string) (io.ReadCloser, error) {
+ return os.Open(name)
+ }
+ osOpen = defaultOSOpen
+)
+
+// getContainerIDFromCGroup returns the id of the container from the cgroup file.
+// If no container id found, an empty string will be returned.
+func getContainerIDFromCGroup() (string, error) {
+ if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) {
+ // File does not exist, skip
+ return "", nil
+ }
+
+ file, err := osOpen(cgroupPath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ return getContainerIDFromReader(file), nil
+}
+
+// getContainerIDFromReader returns the id of the container from reader.
+func getContainerIDFromReader(reader io.Reader) string {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if id := getContainerIDFromLine(line); id != "" {
+ return id
+ }
+ }
+ return ""
+}
+
+// getContainerIDFromLine returns the id of the container from one string line.
+func getContainerIDFromLine(line string) string {
+ matches := cgroupContainerIDRe.FindStringSubmatch(line)
+ if len(matches) <= 1 {
+ return ""
+ }
+ return matches[1]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
new file mode 100644
index 000000000..64939a271
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package resource provides detecting and representing resources.
+//
+// The fundamental struct is a Resource which holds identifying information
+// about the entities for which telemetry is exported.
+//
+// To automatically construct Resources from an environment a Detector
+// interface is defined. Implementations of this interface can be passed to
+// the Detect function to generate a Resource from the merged information.
+//
+// To load a user defined Resource from the environment variable
+// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret
+// the value as a list of comma delimited key/value pairs
+// (e.g. `=,=,...`).
+//
+// While this package provides a stable API,
+// the attributes added by resource detectors may change.
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
new file mode 100644
index 000000000..4a1b017ee
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -0,0 +1,95 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "os"
+ "strings"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+)
+
+const (
+ // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
+ resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials
+
+ // svcNameKey is the environment variable name that Service Name information will be read from.
+ svcNameKey = "OTEL_SERVICE_NAME"
+)
+
+// errMissingValue is returned when a resource value is missing.
+var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
+
+// fromEnv is a Detector that implements the Detector and collects
+// resources from environment. This Detector is included as a
+// builtin.
+type fromEnv struct{}
+
+// compile time assertion that FromEnv implements Detector interface.
+var _ Detector = fromEnv{}
+
+// Detect collects resources from environment.
+func (fromEnv) Detect(context.Context) (*Resource, error) {
+ attrs := strings.TrimSpace(os.Getenv(resourceAttrKey))
+ svcName := strings.TrimSpace(os.Getenv(svcNameKey))
+
+ if attrs == "" && svcName == "" {
+ return Empty(), nil
+ }
+
+ var res *Resource
+
+ if svcName != "" {
+ res = NewSchemaless(semconv.ServiceName(svcName))
+ }
+
+ r2, err := constructOTResources(attrs)
+
+ // Ensure that the resource with the service name from OTEL_SERVICE_NAME
+ // takes precedence, if it was defined.
+ res, err2 := Merge(r2, res)
+
+ if err == nil {
+ err = err2
+ } else if err2 != nil {
+ err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
+ }
+
+ return res, err
+}
+
+func constructOTResources(s string) (*Resource, error) {
+ if s == "" {
+ return Empty(), nil
+ }
+ pairs := strings.Split(s, ",")
+ var attrs []attribute.KeyValue
+ var invalid []string
+ for _, p := range pairs {
+ k, v, found := strings.Cut(p, "=")
+ if !found {
+ invalid = append(invalid, p)
+ continue
+ }
+ key := strings.TrimSpace(k)
+ val, err := url.PathUnescape(strings.TrimSpace(v))
+ if err != nil {
+ // Retain original value if decoding fails, otherwise it will be
+ // an empty string.
+ val = v
+ otel.Handle(err)
+ }
+ attrs = append(attrs, attribute.String(key, val))
+ }
+ var err error
+ if len(invalid) > 0 {
+ err = fmt.Errorf("%w: %v", errMissingValue, invalid)
+ }
+ return NewSchemaless(attrs...), err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
new file mode 100644
index 000000000..5fed33d4f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -0,0 +1,109 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "strings"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+)
+
+type hostIDProvider func() (string, error)
+
+var defaultHostIDProvider hostIDProvider = platformHostIDReader.read
+
+var hostID = defaultHostIDProvider
+
+type hostIDReader interface {
+ read() (string, error)
+}
+
+type fileReader func(string) (string, error)
+
+type commandExecutor func(string, ...string) (string, error)
+
+// hostIDReaderBSD implements hostIDReader.
+type hostIDReaderBSD struct {
+ execCommand commandExecutor
+ readFile fileReader
+}
+
+// read attempts to read the machine-id from /etc/hostid. If not found it will
+// execute `kenv -q smbios.system.uuid`. If neither location yields an id an
+// error will be returned.
+func (r *hostIDReaderBSD) read() (string, error) {
+ if result, err := r.readFile("/etc/hostid"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ return "", errors.New("host id not found in: /etc/hostid or kenv")
+}
+
+// hostIDReaderDarwin implements hostIDReader.
+type hostIDReaderDarwin struct {
+ execCommand commandExecutor
+}
+
+// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
+// from the IOPlatformUUID line. If the command fails or the uuid cannot be
+// parsed an error will be returned.
+func (r *hostIDReaderDarwin) read() (string, error) {
+ result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
+ if err != nil {
+ return "", err
+ }
+
+ lines := strings.Split(result, "\n")
+ for _, line := range lines {
+ if strings.Contains(line, "IOPlatformUUID") {
+ parts := strings.Split(line, " = ")
+ if len(parts) == 2 {
+ return strings.Trim(parts[1], "\""), nil
+ }
+ break
+ }
+ }
+
+ return "", errors.New("could not parse IOPlatformUUID")
+}
+
+type hostIDReaderLinux struct {
+ readFile fileReader
+}
+
+// read attempts to read the machine-id from /etc/machine-id followed by
+// /var/lib/dbus/machine-id. If neither location yields an ID an error will
+// be returned.
+func (r *hostIDReaderLinux) read() (string, error) {
+ if result, err := r.readFile("/etc/machine-id"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id")
+}
+
+type hostIDDetector struct{}
+
+// Detect returns a *Resource containing the platform specific host id.
+func (hostIDDetector) Detect(context.Context) (*Resource, error) {
+ hostID, err := hostID()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.HostID(hostID),
+ ), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
new file mode 100644
index 000000000..cc8b8938e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
@@ -0,0 +1,12 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build dragonfly || freebsd || netbsd || openbsd || solaris
+// +build dragonfly freebsd netbsd openbsd solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderBSD{
+ execCommand: execCommand,
+ readFile: readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
new file mode 100644
index 000000000..b09fde3b7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderDarwin{
+ execCommand: execCommand,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
new file mode 100644
index 000000000..d9e5d1a8f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os/exec"
+
+func execCommand(name string, arg ...string) (string, error) {
+ cmd := exec.Command(name, arg...)
+ b, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+
+ return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
new file mode 100644
index 000000000..f84f17324
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
@@ -0,0 +1,11 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build linux
+// +build linux
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderLinux{
+ readFile: readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
new file mode 100644
index 000000000..6354b3560
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os"
+
+func readFile(filename string) (string, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return "", err
+ }
+
+ return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
new file mode 100644
index 000000000..df12c44c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// hostIDReaderUnsupported is a placeholder implementation for operating systems
+// for which this project currently doesn't support host.id
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+type hostIDReaderUnsupported struct{}
+
+func (*hostIDReaderUnsupported) read() (string, error) {
+ return "", nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
new file mode 100644
index 000000000..3677c83d7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build windows
+// +build windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "golang.org/x/sys/windows/registry"
+)
+
+// implements hostIDReader.
+type hostIDReaderWindows struct{}
+
+// read reads MachineGuid from the Windows registry key:
+// SOFTWARE\Microsoft\Cryptography.
+func (*hostIDReaderWindows) read() (string, error) {
+ k, err := registry.OpenKey(
+ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
+ registry.QUERY_VALUE|registry.WOW64_64KEY,
+ )
+ if err != nil {
+ return "", err
+ }
+ defer k.Close()
+
+ guid, _, err := k.GetStringValue("MachineGuid")
+ if err != nil {
+ return "", err
+ }
+
+ return guid, nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderWindows{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
new file mode 100644
index 000000000..51da76e80
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+)
+
+type osDescriptionProvider func() (string, error)
+
+var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription
+
+var osDescription = defaultOSDescriptionProvider
+
+func setDefaultOSDescriptionProvider() {
+ setOSDescriptionProvider(defaultOSDescriptionProvider)
+}
+
+func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
+ osDescription = osDescriptionProvider
+}
+
+type (
+ osTypeDetector struct{}
+ osDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the operating system type the
+// service is running on.
+func (osTypeDetector) Detect(context.Context) (*Resource, error) {
+ osType := runtimeOS()
+
+ osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ osTypeAttribute,
+ ), nil
+}
+
+// Detect returns a *Resource that describes the operating system the
+// service is running on.
+func (osDescriptionDetector) Detect(context.Context) (*Resource, error) {
+ description, err := osDescription()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.OSDescription(description),
+ ), nil
+}
+
+// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime
+// into an OS type attribute with the corresponding value defined by the semantic
+// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase
+// and used as the value for the returned OS type attribute.
+func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
+ // the elements in this map are the intersection between
+ // available GOOS values and defined semconv OS types
+ osTypeAttributeMap := map[string]attribute.KeyValue{
+ "aix": semconv.OSTypeAIX,
+ "darwin": semconv.OSTypeDarwin,
+ "dragonfly": semconv.OSTypeDragonflyBSD,
+ "freebsd": semconv.OSTypeFreeBSD,
+ "linux": semconv.OSTypeLinux,
+ "netbsd": semconv.OSTypeNetBSD,
+ "openbsd": semconv.OSTypeOpenBSD,
+ "solaris": semconv.OSTypeSolaris,
+ "windows": semconv.OSTypeWindows,
+ "zos": semconv.OSTypeZOS,
+ }
+
+ var osTypeAttribute attribute.KeyValue
+
+ if attr, ok := osTypeAttributeMap[osType]; ok {
+ osTypeAttribute = attr
+ } else {
+ osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType))
+ }
+
+ return osTypeAttribute
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
new file mode 100644
index 000000000..3d703c5d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+)
+
+type plist struct {
+ XMLName xml.Name `xml:"plist"`
+ Dict dict `xml:"dict"`
+}
+
+type dict struct {
+ Key []string `xml:"key"`
+ String []string `xml:"string"`
+}
+
+// osRelease builds a string describing the operating system release based on the
+// contents of the property list (.plist) system files. If no .plist files are found,
+// or if the required properties to build the release description string are missing,
+// an empty string is returned instead. The generated string resembles the output of
+// the `sw_vers` commandline program, but in a single-line string. For more information
+// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS.
+func osRelease() string {
+ file, err := getPlistFile()
+ if err != nil {
+ return ""
+ }
+
+ defer file.Close()
+
+ values, err := parsePlistFile(file)
+ if err != nil {
+ return ""
+ }
+
+ return buildOSRelease(values)
+}
+
+// getPlistFile returns a *os.File pointing to one of the well-known .plist files
+// available on macOS. If no file can be opened, it returns an error.
+func getPlistFile() (*os.File, error) {
+ return getFirstAvailableFile([]string{
+ "/System/Library/CoreServices/SystemVersion.plist",
+ "/System/Library/CoreServices/ServerVersion.plist",
+ })
+}
+
+// parsePlistFile process the file pointed by `file` as a .plist file and returns
+// a map with the key-values for each pair of correlated and elements
+// contained in it.
+func parsePlistFile(file io.Reader) (map[string]string, error) {
+ var v plist
+
+ err := xml.NewDecoder(file).Decode(&v)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(v.Dict.Key) != len(v.Dict.String) {
+ return nil, errors.New("the number of and elements doesn't match")
+ }
+
+ properties := make(map[string]string, len(v.Dict.Key))
+ for i, key := range v.Dict.Key {
+ properties[key] = v.Dict.String[i]
+ }
+
+ return properties, nil
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It tries to find the `ProductName`, `ProductVersion`
+// and `ProductBuildVersion` properties. If some of these properties are not found,
+// it returns an empty string.
+func buildOSRelease(properties map[string]string) string {
+ productName := properties["ProductName"]
+ productVersion := properties["ProductVersion"]
+ productBuildVersion := properties["ProductBuildVersion"]
+
+ if productName == "" || productVersion == "" || productBuildVersion == "" {
+ return ""
+ }
+
+ return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
new file mode 100644
index 000000000..7252af79f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -0,0 +1,143 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// osRelease builds a string describing the operating system release based on the
+// properties of the os-release file. If no os-release file is found, or if the
+// required properties to build the release description string are missing, an empty
+// string is returned instead. For more information about os-release files, see:
+// https://www.freedesktop.org/software/systemd/man/os-release.html
+func osRelease() string {
+ file, err := getOSReleaseFile()
+ if err != nil {
+ return ""
+ }
+
+ defer file.Close()
+
+ values := parseOSReleaseFile(file)
+
+ return buildOSRelease(values)
+}
+
+// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release
+// files, according to their order of preference. If no file can be opened, it
+// returns an error.
+func getOSReleaseFile() (*os.File, error) {
+ return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"})
+}
+
+// parseOSReleaseFile process the file pointed by `file` as an os-release file and
+// returns a map with the key-values contained in it. Empty lines or lines starting
+// with a '#' character are ignored, as well as lines with the missing key=value
+// separator. Values are unquoted and unescaped.
+func parseOSReleaseFile(file io.Reader) map[string]string {
+ values := make(map[string]string)
+ scanner := bufio.NewScanner(file)
+
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if skip(line) {
+ continue
+ }
+
+ key, value, ok := parse(line)
+ if ok {
+ values[key] = value
+ }
+ }
+
+ return values
+}
+
+// skip reports whether the line is blank or starts with a '#' character, and
+// therefore should be skipped from processing.
+func skip(line string) bool {
+ line = strings.TrimSpace(line)
+
+ return line == "" || strings.HasPrefix(line, "#")
+}
+
+// parse attempts to split the provided line on the first '=' character, and then
+// sanitize each side of the split before returning them as a key-value pair.
+func parse(line string) (string, string, bool) {
+ k, v, found := strings.Cut(line, "=")
+
+ if !found || k == "" {
+ return "", "", false
+ }
+
+ key := strings.TrimSpace(k)
+ value := unescape(unquote(strings.TrimSpace(v)))
+
+ return key, value, true
+}
+
+// unquote checks whether the string `s` is quoted with double or single quotes
+// and, if so, returns a version of the string without them. Otherwise it returns
+// the provided string unchanged.
+func unquote(s string) string {
+ if len(s) < 2 {
+ return s
+ }
+
+ if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] {
+ return s[1 : len(s)-1]
+ }
+
+ return s
+}
+
+// unescape removes the `\` prefix from some characters that are expected
+// to have it added in front of them for escaping purposes.
+func unescape(s string) string {
+ return strings.NewReplacer(
+ `\$`, `$`,
+ `\"`, `"`,
+ `\'`, `'`,
+ `\\`, `\`,
+ "\\`", "`",
+ ).Replace(s)
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It favors a combination of the `NAME` and `VERSION`
+// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't
+// found), and using `PRETTY_NAME` alone if some of the previous are not present. If
+// none of these properties are found, it returns an empty string.
+//
+// The rationale behind not using `PRETTY_NAME` as first choice was that, for some
+// Linux distributions, it doesn't include the same detail that can be found on the
+// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with
+// other properties can produce "pretty" redundant strings in some cases.
+func buildOSRelease(values map[string]string) string {
+ var osRelease string
+
+ name := values["NAME"]
+ version := values["VERSION"]
+
+ if version == "" {
+ version = values["VERSION_ID"]
+ }
+
+ if name != "" && version != "" {
+ osRelease = fmt.Sprintf("%s %s", name, version)
+ } else {
+ osRelease = values["PRETTY_NAME"]
+ }
+
+ return osRelease
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
new file mode 100644
index 000000000..a6ff26a4d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
@@ -0,0 +1,79 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+type unameProvider func(buf *unix.Utsname) (err error)
+
+var defaultUnameProvider unameProvider = unix.Uname
+
+var currentUnameProvider = defaultUnameProvider
+
+func setDefaultUnameProvider() {
+ setUnameProvider(defaultUnameProvider)
+}
+
+func setUnameProvider(unameProvider unameProvider) {
+ currentUnameProvider = unameProvider
+}
+
+// platformOSDescription returns a human readable OS version information string.
+// The final string combines OS release information (where available) and the
+// result of the `uname` system call.
+func platformOSDescription() (string, error) {
+ uname, err := uname()
+ if err != nil {
+ return "", err
+ }
+
+ osRelease := osRelease()
+ if osRelease != "" {
+ return fmt.Sprintf("%s (%s)", osRelease, uname), nil
+ }
+
+ return uname, nil
+}
+
+// uname issues a uname(2) system call (or equivalent on systems which doesn't
+// have one) and formats the output in a single string, similar to the output
+// of the `uname` commandline program. The final string resembles the one
+// obtained with a call to `uname -snrvm`.
+func uname() (string, error) {
+ var utsName unix.Utsname
+
+ err := currentUnameProvider(&utsName)
+ if err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("%s %s %s %s %s",
+ unix.ByteSliceToString(utsName.Sysname[:]),
+ unix.ByteSliceToString(utsName.Nodename[:]),
+ unix.ByteSliceToString(utsName.Release[:]),
+ unix.ByteSliceToString(utsName.Version[:]),
+ unix.ByteSliceToString(utsName.Machine[:]),
+ ), nil
+}
+
+// getFirstAvailableFile returns an *os.File of the first available
+// file from a list of candidate file paths.
+func getFirstAvailableFile(candidates []string) (*os.File, error) {
+ for _, c := range candidates {
+ file, err := os.Open(c)
+ if err == nil {
+ return file, nil
+ }
+ }
+
+ return nil, fmt.Errorf("no candidate file available: %v", candidates)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
new file mode 100644
index 000000000..a77742b07
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
+// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// platformOSDescription is a placeholder implementation for OSes
+// for which this project currently doesn't support os.description
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+func platformOSDescription() (string, error) {
+ return "", nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
new file mode 100644
index 000000000..a6a5a53c0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "fmt"
+ "strconv"
+
+ "golang.org/x/sys/windows/registry"
+)
+
+// platformOSDescription returns a human readable OS version information string.
+// It does so by querying registry values under the
+// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string
+// resembles the one displayed by the Version Reporter Applet (winver.exe).
+func platformOSDescription() (string, error) {
+ k, err := registry.OpenKey(
+ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+ if err != nil {
+ return "", err
+ }
+
+ defer k.Close()
+
+ var (
+ productName = readProductName(k)
+ displayVersion = readDisplayVersion(k)
+ releaseID = readReleaseID(k)
+ currentMajorVersionNumber = readCurrentMajorVersionNumber(k)
+ currentMinorVersionNumber = readCurrentMinorVersionNumber(k)
+ currentBuildNumber = readCurrentBuildNumber(k)
+ ubr = readUBR(k)
+ )
+
+ if displayVersion != "" {
+ displayVersion += " "
+ }
+
+ return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]",
+ productName,
+ displayVersion,
+ releaseID,
+ currentMajorVersionNumber,
+ currentMinorVersionNumber,
+ currentBuildNumber,
+ ubr,
+ ), nil
+}
+
+func getStringValue(name string, k registry.Key) string {
+ value, _, _ := k.GetStringValue(name)
+
+ return value
+}
+
+func getIntegerValue(name string, k registry.Key) uint64 {
+ value, _, _ := k.GetIntegerValue(name)
+
+ return value
+}
+
+func readProductName(k registry.Key) string {
+ return getStringValue("ProductName", k)
+}
+
+func readDisplayVersion(k registry.Key) string {
+ return getStringValue("DisplayVersion", k)
+}
+
+func readReleaseID(k registry.Key) string {
+ return getStringValue("ReleaseID", k)
+}
+
+func readCurrentMajorVersionNumber(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10)
+}
+
+func readCurrentMinorVersionNumber(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10)
+}
+
+func readCurrentBuildNumber(k registry.Key) string {
+ return getStringValue("CurrentBuildNumber", k)
+}
+
+func readUBR(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("UBR", k), 10)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
new file mode 100644
index 000000000..138e57721
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -0,0 +1,173 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+)
+
+type (
+ pidProvider func() int
+ executablePathProvider func() (string, error)
+ commandArgsProvider func() []string
+ ownerProvider func() (*user.User, error)
+ runtimeNameProvider func() string
+ runtimeVersionProvider func() string
+ runtimeOSProvider func() string
+ runtimeArchProvider func() string
+)
+
+var (
+ defaultPidProvider pidProvider = os.Getpid
+ defaultExecutablePathProvider executablePathProvider = os.Executable
+ defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args }
+ defaultOwnerProvider ownerProvider = user.Current
+ defaultRuntimeNameProvider runtimeNameProvider = func() string {
+ if runtime.Compiler == "gc" {
+ return "go"
+ }
+ return runtime.Compiler
+ }
+ defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
+ defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS }
+ defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH }
+)
+
+var (
+ pid = defaultPidProvider
+ executablePath = defaultExecutablePathProvider
+ commandArgs = defaultCommandArgsProvider
+ owner = defaultOwnerProvider
+ runtimeName = defaultRuntimeNameProvider
+ runtimeVersion = defaultRuntimeVersionProvider
+ runtimeOS = defaultRuntimeOSProvider
+ runtimeArch = defaultRuntimeArchProvider
+)
+
+func setDefaultOSProviders() {
+ setOSProviders(
+ defaultPidProvider,
+ defaultExecutablePathProvider,
+ defaultCommandArgsProvider,
+ )
+}
+
+func setOSProviders(
+ pidProvider pidProvider,
+ executablePathProvider executablePathProvider,
+ commandArgsProvider commandArgsProvider,
+) {
+ pid = pidProvider
+ executablePath = executablePathProvider
+ commandArgs = commandArgsProvider
+}
+
+func setDefaultRuntimeProviders() {
+ setRuntimeProviders(
+ defaultRuntimeNameProvider,
+ defaultRuntimeVersionProvider,
+ defaultRuntimeOSProvider,
+ defaultRuntimeArchProvider,
+ )
+}
+
+func setRuntimeProviders(
+ runtimeNameProvider runtimeNameProvider,
+ runtimeVersionProvider runtimeVersionProvider,
+ runtimeOSProvider runtimeOSProvider,
+ runtimeArchProvider runtimeArchProvider,
+) {
+ runtimeName = runtimeNameProvider
+ runtimeVersion = runtimeVersionProvider
+ runtimeOS = runtimeOSProvider
+ runtimeArch = runtimeArchProvider
+}
+
+func setDefaultUserProviders() {
+ setUserProviders(defaultOwnerProvider)
+}
+
+func setUserProviders(ownerProvider ownerProvider) {
+ owner = ownerProvider
+}
+
+type (
+ processPIDDetector struct{}
+ processExecutableNameDetector struct{}
+ processExecutablePathDetector struct{}
+ processCommandArgsDetector struct{}
+ processOwnerDetector struct{}
+ processRuntimeNameDetector struct{}
+ processRuntimeVersionDetector struct{}
+ processRuntimeDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the process identifier (PID) of the
+// executing process.
+func (processPIDDetector) Detect(context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
+}
+
+// Detect returns a *Resource that describes the name of the process executable.
+func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) {
+ executableName := filepath.Base(commandArgs()[0])
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
+}
+
+// Detect returns a *Resource that describes the full path of the process executable.
+func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) {
+ executablePath, err := executablePath()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil
+}
+
+// Detect returns a *Resource that describes all the command arguments as received
+// by the process.
+func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
+}
+
+// Detect returns a *Resource that describes the username of the user that owns the
+// process.
+func (processOwnerDetector) Detect(context.Context) (*Resource, error) {
+ owner, err := owner()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil
+}
+
+// Detect returns a *Resource that describes the name of the compiler used to compile
+// this process image.
+func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
+}
+
+// Detect returns a *Resource that describes the version of the runtime of this process.
+func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
+}
+
+// Detect returns a *Resource that describes the runtime of this process.
+func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) {
+ runtimeDescription := fmt.Sprintf(
+ "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ProcessRuntimeDescription(runtimeDescription),
+ ), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
new file mode 100644
index 000000000..28e1e4f7e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -0,0 +1,309 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/internal/x"
+)
+
+// Resource describes an entity about which identifying information
+// and metadata is exposed. Resource is an immutable object,
+// equivalent to a map from key to unique value.
+//
+// Resources should be passed and stored as pointers
+// (`*resource.Resource`). The `nil` value is equivalent to an empty
+// Resource.
+//
+// Note that the Go == operator compares not just the resource attributes but
+// also all other internals of the Resource type. Therefore, Resource values
+// should not be used as map or database keys. In general, the [Resource.Equal]
+// method should be used instead of direct comparison with ==, since that
+// method ensures the correct comparison of resource attributes, and the
+// [attribute.Distinct] returned from [Resource.Equivalent] should be used for
+// map and database keys instead.
+type Resource struct {
+ attrs attribute.Set
+ schemaURL string
+}
+
+// Compile-time check that the Resource remains comparable.
+var _ map[Resource]struct{} = nil
+
+var (
+ defaultResource *Resource
+ defaultResourceOnce sync.Once
+)
+
+// ErrSchemaURLConflict is an error returned when two Resources are merged
+// together that contain different, non-empty, schema URLs.
+var ErrSchemaURLConflict = errors.New("conflicting Schema URL")
+
+// New returns a [Resource] built using opts.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if options that provide a [Detector] are used and that
+// error is returned from one or more of the Detectors. It may also return a
+// merge-conflict Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from the opts results in a
+// schema URL conflict (see [Resource.Merge] for more information). It is up to
+// the caller to determine if this returned Resource should be used or not
+// based on these errors.
+func New(ctx context.Context, opts ...Option) (*Resource, error) {
+ cfg := config{}
+ for _, opt := range opts {
+ cfg = opt.apply(cfg)
+ }
+
+ r := &Resource{schemaURL: cfg.schemaURL}
+ return r, detect(ctx, r, cfg.detectors)
+}
+
+// NewWithAttributes creates a resource from attrs and associates the resource with a
+// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs
+// contains any invalid items those items will be dropped. The attrs are assumed to be
+// in a schema identified by schemaURL.
+func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource {
+ resource := NewSchemaless(attrs...)
+ resource.schemaURL = schemaURL
+ return resource
+}
+
+// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys,
+// the last value will be used. If attrs contains any invalid items those items will
+// be dropped. The resource will not be associated with a schema URL. If the schema
+// of the attrs is known use NewWithAttributes instead.
+func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
+ if len(attrs) == 0 {
+ return &Resource{}
+ }
+
+ // Ensure attributes comply with the specification:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute
+ s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool {
+ return kv.Valid()
+ })
+
+ // If attrs only contains invalid entries do not allocate a new resource.
+ if s.Len() == 0 {
+ return &Resource{}
+ }
+
+ return &Resource{attrs: s} //nolint
+}
+
+// String implements the Stringer interface and provides a
+// human-readable form of the resource.
+//
+// Avoid using this representation as the key in a map of resources,
+// use Equivalent() as the key instead.
+func (r *Resource) String() string {
+ if r == nil {
+ return ""
+ }
+ return r.attrs.Encoded(attribute.DefaultEncoder())
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Resource.
+func (r *Resource) MarshalLog() any {
+ return struct {
+ Attributes attribute.Set
+ SchemaURL string
+ }{
+ Attributes: r.attrs,
+ SchemaURL: r.schemaURL,
+ }
+}
+
+// Attributes returns a copy of attributes from the resource in a sorted order.
+// To avoid allocating a new slice, use an iterator.
+func (r *Resource) Attributes() []attribute.KeyValue {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.ToSlice()
+}
+
+// SchemaURL returns the schema URL associated with Resource r.
+func (r *Resource) SchemaURL() string {
+ if r == nil {
+ return ""
+ }
+ return r.schemaURL
+}
+
+// Iter returns an iterator of the Resource attributes.
+// This is ideal to use if you do not want a copy of the attributes.
+func (r *Resource) Iter() attribute.Iterator {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.Iter()
+}
+
+// Equal reports whether r and o represent the same resource. Two resources can
+// be equal even if they have different schema URLs.
+//
+// See the documentation on the [Resource] type for the pitfalls of using ==
+// with Resource values; most code should use Equal instead.
+func (r *Resource) Equal(o *Resource) bool {
+ if r == nil {
+ r = Empty()
+ }
+ if o == nil {
+ o = Empty()
+ }
+ return r.Equivalent() == o.Equivalent()
+}
+
+// Merge creates a new [Resource] by merging a and b.
+//
+// If there are common keys between a and b, then the value from b will
+// overwrite the value from a, even if b's value is empty.
+//
+// The SchemaURL of the resources will be merged according to the
+// [OpenTelemetry specification rules]:
+//
+// - If a's schema URL is empty then the returned Resource's schema URL will
+// be set to the schema URL of b,
+// - Else if b's schema URL is empty then the returned Resource's schema URL
+// will be set to the schema URL of a,
+// - Else if the schema URLs of a and b are the same then that will be the
+// schema URL of the returned Resource,
+// - Else this is a merging error. If the resources have different,
+// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will
+// be returned with the merged Resource. The merged Resource will have an
+// empty schema URL. It may be the case that some unintended attributes
+// have been overwritten or old semantic conventions persisted in the
+// returned Resource. It is up to the caller to determine if this returned
+// Resource should be used or not.
+//
+// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
+func Merge(a, b *Resource) (*Resource, error) {
+ if a == nil && b == nil {
+ return Empty(), nil
+ }
+ if a == nil {
+ return b, nil
+ }
+ if b == nil {
+ return a, nil
+ }
+
+ // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
+ // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
+ mi := attribute.NewMergeIterator(b.Set(), a.Set())
+ combine := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+ for mi.Next() {
+ combine = append(combine, mi.Attribute())
+ }
+
+ switch {
+ case a.schemaURL == "":
+ return NewWithAttributes(b.schemaURL, combine...), nil
+ case b.schemaURL == "":
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ case a.schemaURL == b.schemaURL:
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ }
+ // Return the merged resource with an appropriate error. It is up to
+ // the user to decide if the returned resource can be used or not.
+ return NewSchemaless(combine...), fmt.Errorf(
+ "%w: %s and %s",
+ ErrSchemaURLConflict,
+ a.schemaURL,
+ b.schemaURL,
+ )
+}
+
+// Empty returns an instance of Resource with no attributes. It is
+// equivalent to a `nil` Resource.
+func Empty() *Resource {
+ return &Resource{}
+}
+
+// Default returns an instance of Resource with a default
+// "service.name" and OpenTelemetrySDK attributes.
+func Default() *Resource {
+ defaultResourceOnce.Do(func() {
+ var err error
+ defaultDetectors := []Detector{
+ defaultServiceNameDetector{},
+ fromEnv{},
+ telemetrySDK{},
+ }
+ if x.Resource.Enabled() {
+ defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...)
+ }
+ defaultResource, err = Detect(
+ context.Background(),
+ defaultDetectors...,
+ )
+ if err != nil {
+ otel.Handle(err)
+ }
+ // If Detect did not return a valid resource, fall back to emptyResource.
+ if defaultResource == nil {
+ defaultResource = &Resource{}
+ }
+ })
+ return defaultResource
+}
+
+// Environment returns an instance of Resource with attributes
+// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
+func Environment() *Resource {
+ detector := &fromEnv{}
+ resource, err := detector.Detect(context.Background())
+ if err != nil {
+ otel.Handle(err)
+ }
+ return resource
+}
+
+// Equivalent returns an object that can be compared for equality
+// between two resources. This value is suitable for use as a key in
+// a map.
+func (r *Resource) Equivalent() attribute.Distinct {
+ return r.Set().Equivalent()
+}
+
+// Set returns the equivalent *attribute.Set of this resource's attributes.
+func (r *Resource) Set() *attribute.Set {
+ if r == nil {
+ r = Empty()
+ }
+ return &r.attrs
+}
+
+// MarshalJSON encodes the resource attributes as a JSON list of { "Key":
+// "...", "Value": ... } pairs in order sorted by key.
+func (r *Resource) MarshalJSON() ([]byte, error) {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.MarshalJSON()
+}
+
+// Len returns the number of unique key-values in this Resource.
+func (r *Resource) Len() int {
+ if r == nil {
+ return 0
+ }
+ return r.attrs.Len()
+}
+
+// Encoded returns an encoded representation of the resource.
+func (r *Resource) Encoded(enc attribute.Encoder) string {
+ if r == nil {
+ return ""
+ }
+ return r.attrs.Encoded(enc)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
new file mode 100644
index 000000000..7f97cc31e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package sdk provides the OpenTelemetry default SDK for Go.
+package sdk // import "go.opentelemetry.io/otel/sdk"
+
+// Version is the current release version of the OpenTelemetry SDK in use.
+func Version() string {
+ return "1.38.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
deleted file mode 100644
index 82e1f46b4..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.20.0
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
deleted file mode 100644
index 6685c392b..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go
+++ /dev/null
@@ -1,1198 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Describes HTTP attributes.
-const (
- // HTTPMethodKey is the attribute Key conforming to the "http.method"
- // semantic conventions. It represents the hTTP request method.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- HTTPMethodKey = attribute.Key("http.method")
-
- // HTTPStatusCodeKey is the attribute Key conforming to the
- // "http.status_code" semantic conventions. It represents the [HTTP
- // response status code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If and only if one was
- // received/sent.)
- // Stability: stable
- // Examples: 200
- HTTPStatusCodeKey = attribute.Key("http.status_code")
-)
-
-// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
-// semantic conventions. It represents the hTTP request method.
-func HTTPMethod(val string) attribute.KeyValue {
- return HTTPMethodKey.String(val)
-}
-
-// HTTPStatusCode returns an attribute KeyValue conforming to the
-// "http.status_code" semantic conventions. It represents the [HTTP response
-// status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPStatusCode(val int) attribute.KeyValue {
- return HTTPStatusCodeKey.Int(val)
-}
-
-// HTTP Server spans attributes
-const (
- // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
- // semantic conventions. It represents the URI scheme identifying the used
- // protocol.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'http', 'https'
- HTTPSchemeKey = attribute.Key("http.scheme")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route (path template in
- // the format used by the respective server framework). See note below
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if it's available)
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/specification/trace/semantic_conventions/http.md#http-server-definitions)
- // if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
-// semantic conventions. It represents the URI scheme identifying the used
-// protocol.
-func HTTPScheme(val string) attribute.KeyValue {
- return HTTPSchemeKey.String(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route (path template in the
-// format used by the respective server framework). See note below
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the name identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'click', 'exception'
- EventNameKey = attribute.Key("event.name")
-
- // EventDomainKey is the attribute Key conforming to the "event.domain"
- // semantic conventions. It represents the domain identifies the business
- // context for the events.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: Events across different domains may have same `event.name`, yet be
- // unrelated events.
- EventDomainKey = attribute.Key("event.domain")
-)
-
-var (
- // Events from browser apps
- EventDomainBrowser = EventDomainKey.String("browser")
- // Events from mobile apps
- EventDomainDevice = EventDomainKey.String("device")
- // Events from Kubernetes
- EventDomainK8S = EventDomainKey.String("k8s")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the name identifies the event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetTransportKey is the attribute Key conforming to the "net.transport"
- // semantic conventions. It represents the transport protocol used. See
- // note below.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- NetTransportKey = attribute.Key("net.transport")
-
- // NetProtocolNameKey is the attribute Key conforming to the
- // "net.protocol.name" semantic conventions. It represents the application
- // layer protocol used. The value SHOULD be normalized to lowercase.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- NetProtocolNameKey = attribute.Key("net.protocol.name")
-
- // NetProtocolVersionKey is the attribute Key conforming to the
- // "net.protocol.version" semantic conventions. It represents the version
- // of the application layer protocol used. See note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3.1.1'
- // Note: `net.protocol.version` refers to the version of the protocol used
- // and might be different from the protocol client's version. If the HTTP
- // client used has a version of `0.27.2`, but sends HTTP version `1.1`,
- // this attribute should be set to `1.1`.
- NetProtocolVersionKey = attribute.Key("net.protocol.version")
-
- // NetSockPeerNameKey is the attribute Key conforming to the
- // "net.sock.peer.name" semantic conventions. It represents the remote
- // socket peer name.
- //
- // Type: string
- // RequirementLevel: Recommended (If available and different from
- // `net.peer.name` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 'proxy.example.com'
- NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
-
- // NetSockPeerAddrKey is the attribute Key conforming to the
- // "net.sock.peer.addr" semantic conventions. It represents the remote
- // socket peer address: IPv4 or IPv6 for internet protocols, path for local
- // communication,
- // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '127.0.0.1', '/tmp/mysql.sock'
- NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
-
- // NetSockPeerPortKey is the attribute Key conforming to the
- // "net.sock.peer.port" semantic conventions. It represents the remote
- // socket peer port.
- //
- // Type: int
- // RequirementLevel: Recommended (If defined for the address family and if
- // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 16456
- NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
-
- // NetSockFamilyKey is the attribute Key conforming to the
- // "net.sock.family" semantic conventions. It represents the protocol
- // [address
- // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
- // which is used for communication.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (If different than `inet` and if
- // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
- // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
- // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
- // instrumentations that follow previous versions of this document.)
- // Stability: stable
- // Examples: 'inet6', 'bluetooth'
- NetSockFamilyKey = attribute.Key("net.sock.family")
-
- // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
- // semantic conventions. It represents the logical remote hostname, see
- // note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com'
- // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
- // extra DNS lookup.
- NetPeerNameKey = attribute.Key("net.peer.name")
-
- // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
- // semantic conventions. It represents the logical remote port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- NetPeerPortKey = attribute.Key("net.peer.port")
-
- // NetHostNameKey is the attribute Key conforming to the "net.host.name"
- // semantic conventions. It represents the logical local hostname or
- // similar, see note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'localhost'
- NetHostNameKey = attribute.Key("net.host.name")
-
- // NetHostPortKey is the attribute Key conforming to the "net.host.port"
- // semantic conventions. It represents the logical local port number,
- // preferably the one that the peer used to connect
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 8080
- NetHostPortKey = attribute.Key("net.host.port")
-
- // NetSockHostAddrKey is the attribute Key conforming to the
- // "net.sock.host.addr" semantic conventions. It represents the local
- // socket address. Useful in case of a multi-IP host.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '192.168.0.1'
- NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
-
- // NetSockHostPortKey is the attribute Key conforming to the
- // "net.sock.host.port" semantic conventions. It represents the local
- // socket port number.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If defined for the address
- // family and if different than `net.host.port` and if `net.sock.host.addr`
- // is set. In other cases, it is still recommended to set this.)
- // Stability: stable
- // Examples: 35555
- NetSockHostPortKey = attribute.Key("net.sock.host.port")
-)
-
-var (
- // ip_tcp
- NetTransportTCP = NetTransportKey.String("ip_tcp")
- // ip_udp
- NetTransportUDP = NetTransportKey.String("ip_udp")
- // Named or anonymous pipe. See note below
- NetTransportPipe = NetTransportKey.String("pipe")
- // In-process communication
- NetTransportInProc = NetTransportKey.String("inproc")
- // Something else (non IP-based)
- NetTransportOther = NetTransportKey.String("other")
-)
-
-var (
- // IPv4 address
- NetSockFamilyInet = NetSockFamilyKey.String("inet")
- // IPv6 address
- NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
- // Unix domain socket path
- NetSockFamilyUnix = NetSockFamilyKey.String("unix")
-)
-
-// NetProtocolName returns an attribute KeyValue conforming to the
-// "net.protocol.name" semantic conventions. It represents the application
-// layer protocol used. The value SHOULD be normalized to lowercase.
-func NetProtocolName(val string) attribute.KeyValue {
- return NetProtocolNameKey.String(val)
-}
-
-// NetProtocolVersion returns an attribute KeyValue conforming to the
-// "net.protocol.version" semantic conventions. It represents the version of
-// the application layer protocol used. See note below.
-func NetProtocolVersion(val string) attribute.KeyValue {
- return NetProtocolVersionKey.String(val)
-}
-
-// NetSockPeerName returns an attribute KeyValue conforming to the
-// "net.sock.peer.name" semantic conventions. It represents the remote socket
-// peer name.
-func NetSockPeerName(val string) attribute.KeyValue {
- return NetSockPeerNameKey.String(val)
-}
-
-// NetSockPeerAddr returns an attribute KeyValue conforming to the
-// "net.sock.peer.addr" semantic conventions. It represents the remote socket
-// peer address: IPv4 or IPv6 for internet protocols, path for local
-// communication,
-// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
-func NetSockPeerAddr(val string) attribute.KeyValue {
- return NetSockPeerAddrKey.String(val)
-}
-
-// NetSockPeerPort returns an attribute KeyValue conforming to the
-// "net.sock.peer.port" semantic conventions. It represents the remote socket
-// peer port.
-func NetSockPeerPort(val int) attribute.KeyValue {
- return NetSockPeerPortKey.Int(val)
-}
-
-// NetPeerName returns an attribute KeyValue conforming to the
-// "net.peer.name" semantic conventions. It represents the logical remote
-// hostname, see note below.
-func NetPeerName(val string) attribute.KeyValue {
- return NetPeerNameKey.String(val)
-}
-
-// NetPeerPort returns an attribute KeyValue conforming to the
-// "net.peer.port" semantic conventions. It represents the logical remote port
-// number
-func NetPeerPort(val int) attribute.KeyValue {
- return NetPeerPortKey.Int(val)
-}
-
-// NetHostName returns an attribute KeyValue conforming to the
-// "net.host.name" semantic conventions. It represents the logical local
-// hostname or similar, see note below.
-func NetHostName(val string) attribute.KeyValue {
- return NetHostNameKey.String(val)
-}
-
-// NetHostPort returns an attribute KeyValue conforming to the
-// "net.host.port" semantic conventions. It represents the logical local port
-// number, preferably the one that the peer used to connect
-func NetHostPort(val int) attribute.KeyValue {
- return NetHostPortKey.Int(val)
-}
-
-// NetSockHostAddr returns an attribute KeyValue conforming to the
-// "net.sock.host.addr" semantic conventions. It represents the local socket
-// address. Useful in case of a multi-IP host.
-func NetSockHostAddr(val string) attribute.KeyValue {
- return NetSockHostAddrKey.String(val)
-}
-
-// NetSockHostPort returns an attribute KeyValue conforming to the
-// "net.sock.host.port" semantic conventions. It represents the local socket
-// port number.
-func NetSockHostPort(val int) attribute.KeyValue {
- return NetSockHostPortKey.Int(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetHostConnectionTypeKey is the attribute Key conforming to the
- // "net.host.connection.type" semantic conventions. It represents the
- // internet connection type currently being used by the host.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'wifi'
- NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
-
- // NetHostConnectionSubtypeKey is the attribute Key conforming to the
- // "net.host.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'LTE'
- NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
-
- // NetHostCarrierNameKey is the attribute Key conforming to the
- // "net.host.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'sprint'
- NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
-
- // NetHostCarrierMccKey is the attribute Key conforming to the
- // "net.host.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '310'
- NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
-
- // NetHostCarrierMncKey is the attribute Key conforming to the
- // "net.host.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '001'
- NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
-
- // NetHostCarrierIccKey is the attribute Key conforming to the
- // "net.host.carrier.icc" semantic conventions. It represents the ISO
- // 3166-1 alpha-2 2-character country code associated with the mobile
- // carrier network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'DE'
- NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
-)
-
-var (
- // wifi
- NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
- // wired
- NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
- // cell
- NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
- // unavailable
- NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
- // unknown
- NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
-)
-
-var (
- // GPRS
- NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
- // EDGE
- NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
- // UMTS
- NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
- // CDMA
- NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
- // IDEN
- NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
- // EHRPD
- NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
- // GSM
- NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
-)
-
-// NetHostCarrierName returns an attribute KeyValue conforming to the
-// "net.host.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetHostCarrierName(val string) attribute.KeyValue {
- return NetHostCarrierNameKey.String(val)
-}
-
-// NetHostCarrierMcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mcc" semantic conventions. It represents the mobile
-// carrier country code.
-func NetHostCarrierMcc(val string) attribute.KeyValue {
- return NetHostCarrierMccKey.String(val)
-}
-
-// NetHostCarrierMnc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mnc" semantic conventions. It represents the mobile
-// carrier network code.
-func NetHostCarrierMnc(val string) attribute.KeyValue {
- return NetHostCarrierMncKey.String(val)
-}
-
-// NetHostCarrierIcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetHostCarrierIcc(val string) attribute.KeyValue {
- return NetHostCarrierIccKey.String(val)
-}
-
-// Semantic conventions for HTTP client and server Spans.
-const (
- // HTTPRequestContentLengthKey is the attribute Key conforming to the
- // "http.request_content_length" semantic conventions. It represents the
- // size of the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
-
- // HTTPResponseContentLengthKey is the attribute Key conforming to the
- // "http.response_content_length" semantic conventions. It represents the
- // size of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
-)
-
-// HTTPRequestContentLength returns an attribute KeyValue conforming to the
-// "http.request_content_length" semantic conventions. It represents the size
-// of the request payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestContentLength(val int) attribute.KeyValue {
- return HTTPRequestContentLengthKey.Int(val)
-}
-
-// HTTPResponseContentLength returns an attribute KeyValue conforming to the
-// "http.response_content_length" semantic conventions. It represents the size
-// of the response payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseContentLength(val int) attribute.KeyValue {
- return HTTPResponseContentLengthKey.Int(val)
-}
-
-// Semantic convention describing per-message attributes populated on messaging
-// spans or links.
-const (
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the [conversation ID](#conversations) identifying the conversation to
- // which the message belongs, represented as a string. Sometimes called
- // "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
- // the "messaging.message.payload_size_bytes" semantic conventions. It
- // represents the (uncompressed) size of the message payload in bytes. Also
- // use this attribute if it is unknown whether the compressed or
- // uncompressed payload size is reported.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2738
- MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
-
- // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
- // conforming to the "messaging.message.payload_compressed_size_bytes"
- // semantic conventions. It represents the compressed size of the message
- // payload in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2048
- MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
-)
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the [conversation ID](#conversations) identifying the
-// conversation to which the message belongs, represented as a string.
-// Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
-// to the "messaging.message.payload_size_bytes" semantic conventions. It
-// represents the (uncompressed) size of the message payload in bytes. Also use
-// this attribute if it is unknown whether the compressed or uncompressed
-// payload size is reported.
-func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadSizeBytesKey.Int(val)
-}
-
-// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
-// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
-// conventions. It represents the compressed size of the message payload in
-// bytes.
-func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
-}
-
-// Semantic convention for attributes that describe messaging destination on
-// broker
-const (
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker does not have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-)
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// Semantic convention for attributes that describe messaging source on broker
-const (
- // MessagingSourceNameKey is the attribute Key conforming to the
- // "messaging.source.name" semantic conventions. It represents the message
- // source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Source name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker does not have such notion, the source name SHOULD uniquely
- // identify the broker.
- MessagingSourceNameKey = attribute.Key("messaging.source.name")
-
- // MessagingSourceTemplateKey is the attribute Key conforming to the
- // "messaging.source.template" semantic conventions. It represents the low
- // cardinality representation of the messaging source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Source names could be constructed from templates. An example would
- // be a source name involving a user name or product id. Although the
- // source name in this case is of high cardinality, the underlying template
- // is of low cardinality and can be effectively used for grouping and
- // aggregation.
- MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
-
- // MessagingSourceTemporaryKey is the attribute Key conforming to the
- // "messaging.source.temporary" semantic conventions. It represents a
- // boolean that is true if the message source is temporary and might not
- // exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
-
- // MessagingSourceAnonymousKey is the attribute Key conforming to the
- // "messaging.source.anonymous" semantic conventions. It represents a
- // boolean that is true if the message source is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
-)
-
-// MessagingSourceName returns an attribute KeyValue conforming to the
-// "messaging.source.name" semantic conventions. It represents the message
-// source name
-func MessagingSourceName(val string) attribute.KeyValue {
- return MessagingSourceNameKey.String(val)
-}
-
-// MessagingSourceTemplate returns an attribute KeyValue conforming to the
-// "messaging.source.template" semantic conventions. It represents the low
-// cardinality representation of the messaging source name
-func MessagingSourceTemplate(val string) attribute.KeyValue {
- return MessagingSourceTemplateKey.String(val)
-}
-
-// MessagingSourceTemporary returns an attribute KeyValue conforming to the
-// "messaging.source.temporary" semantic conventions. It represents a boolean
-// that is true if the message source is temporary and might not exist anymore
-// after messages are processed.
-func MessagingSourceTemporary(val bool) attribute.KeyValue {
- return MessagingSourceTemporaryKey.Bool(val)
-}
-
-// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
-// "messaging.source.anonymous" semantic conventions. It represents a boolean
-// that is true if the message source is anonymous (could be unnamed or have
-// auto-generated name).
-func MessagingSourceAnonymous(val bool) attribute.KeyValue {
- return MessagingSourceAnonymousKey.Bool(val)
-}
-
-// Attributes for RabbitMQ
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If not empty.)
- // Stability: stable
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// Attributes for Apache Kafka
-const (
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaClientIDKey is the attribute Key conforming to the
- // "messaging.kafka.client_id" semantic conventions. It represents the
- // client ID for the Consumer or Producer that is handling the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client-5'
- MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
-
- // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
- // the "messaging.kafka.destination.partition" semantic conventions. It
- // represents the partition the message is sent to.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
-
- // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
- // "messaging.kafka.source.partition" semantic conventions. It represents
- // the partition the message is received from.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: ConditionallyRequired (If value is `true`. When
- // missing, the value is assumed to be `false`.)
- // Stability: stable
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaClientID returns an attribute KeyValue conforming to the
-// "messaging.kafka.client_id" semantic conventions. It represents the client
-// ID for the Consumer or Producer that is handling the message.
-func MessagingKafkaClientID(val string) attribute.KeyValue {
- return MessagingKafkaClientIDKey.String(val)
-}
-
-// MessagingKafkaDestinationPartition returns an attribute KeyValue
-// conforming to the "messaging.kafka.destination.partition" semantic
-// conventions. It represents the partition the message is sent to.
-func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
- return MessagingKafkaDestinationPartitionKey.Int(val)
-}
-
-// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
-// the "messaging.kafka.source.partition" semantic conventions. It represents
-// the partition the message is received from.
-func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
- return MessagingKafkaSourcePartitionKey.Int(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// Attributes for Apache RocketMQ
-const (
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqClientIDKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_id" semantic conventions. It represents the
- // unique identifier for each client.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myhost@8742@s8083jm'
- MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delay time level is not specified.)
- // Stability: stable
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delivery timestamp is not specified.)
- // Stability: stable
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
- // Stability: stable
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
-// "messaging.rocketmq.client_id" semantic conventions. It represents the
-// unique identifier for each client.
-func MessagingRocketmqClientID(val string) attribute.KeyValue {
- return MessagingRocketmqClientIDKey.String(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-)
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
deleted file mode 100644
index 0d1f55a8f..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.20.0 version of the OpenTelemetry specification.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
deleted file mode 100644
index 637763932..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// This semantic convention defines the attributes used to represent a feature
-// flag evaluation as an event.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// RPC received/sent message.
-const (
- // MessageTypeKey is the attribute Key conforming to the "message.type"
- // semantic conventions. It represents the whether this is a received or
- // sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessageTypeKey = attribute.Key("message.type")
-
- // MessageIDKey is the attribute Key conforming to the "message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- MessageIDKey = attribute.Key("message.id")
-
- // MessageCompressedSizeKey is the attribute Key conforming to the
- // "message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageCompressedSizeKey = attribute.Key("message.compressed_size")
-
- // MessageUncompressedSizeKey is the attribute Key conforming to the
- // "message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
- // sent
- MessageTypeSent = MessageTypeKey.String("SENT")
- // received
- MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
-
-// MessageID returns an attribute KeyValue conforming to the "message.id"
-// semantic conventions. It represents the mUST be calculated as two different
-// counters starting from `1` one for sent messages and one for received
-// message.
-func MessageID(val int) attribute.KeyValue {
- return MessageIDKey.Int(val)
-}
-
-// MessageCompressedSize returns an attribute KeyValue conforming to the
-// "message.compressed_size" semantic conventions. It represents the compressed
-// size of the message in bytes.
-func MessageCompressedSize(val int) attribute.KeyValue {
- return MessageCompressedSizeKey.Int(val)
-}
-
-// MessageUncompressedSize returns an attribute KeyValue conforming to the
-// "message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func MessageUncompressedSize(val int) attribute.KeyValue {
- return MessageUncompressedSizeKey.Int(val)
-}
-
-// The attributes used to report a single exception associated with a span.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example above](#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
deleted file mode 100644
index f40c97825..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
deleted file mode 100644
index 9c1840631..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-// HTTP scheme attributes.
-var (
- HTTPSchemeHTTP = HTTPSchemeKey.String("http")
- HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
deleted file mode 100644
index 3d44dae27..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go
+++ /dev/null
@@ -1,2060 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The web browser in which the application represented by the resource is
-// running. The `browser.*` attributes MUST be used only for resources that
-// represent applications running in a web browser (regardless of whether
-// running on a mobile or desktop device).
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS)
-const (
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
-// on Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
- // [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the task
- // definition family this task definition is a member of.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for this task definition.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
-// task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the task
-// definition family this task definition is a member of.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// this task definition.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Resources specific to Amazon Web Services.
-const (
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-)
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// Heroku dyno metadata
-const (
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-)
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageTagKey is the attribute Key conforming to the
- // "container.image.tag" semantic conventions. It represents the container
- // image tag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- ContainerImageTagKey = attribute.Key("container.image.tag")
-)
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageTag returns an attribute KeyValue conforming to the
-// "container.image.tag" semantic conventions. It represents the container
-// image tag.
-func ContainerImageTag(val string) attribute.KeyValue {
- return ContainerImageTagKey.String(val)
-}
-
-// The software deployment.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'staging', 'production'
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment
-// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
-// deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// The device on which the process represented by this resource is running.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human readable version of
- // the device model rather than a machine readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// A serverless instance.
-const (
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `/`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run:** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-)
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// A host is defined as a general computing instance.
-const (
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- HostArchKey = attribute.Key("host.arch")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID. For Cloud, this
- // value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image as defined in [Version
- // Attributes](README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID. For
-// Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image as defined in [Version
-// Attributes](README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// A Kubernetes Cluster.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// A Kubernetes Node object.
-const (
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-)
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// A Kubernetes Namespace.
-const (
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-)
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// A Kubernetes Pod object.
-const (
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-)
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// A container in a
-// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
-const (
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-)
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// A Kubernetes ReplicaSet object.
-const (
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-)
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// A Kubernetes Deployment object.
-const (
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-)
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// A Kubernetes StatefulSet object.
-const (
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-)
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// A Kubernetes DaemonSet object.
-const (
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-)
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// A Kubernetes Job object.
-const (
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-)
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// A Kubernetes CronJob object.
-const (
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-)
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- OSTypeKey = attribute.Key("os.type")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-)
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// The single (language) runtime instance which is monitored.
-const (
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-)
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md#process), e.g.
- // `unknown_service:bash`. If `process.executable.name` is not available,
- // the value MUST be set to `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-)
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'my-k8s-pod-deployment-1',
- // '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to distinguish instances of the same
- // service that exist at the same time (e.g. instances of a horizontally
- // scaled service). It is preferable for the ID to be persistent and stay
- // the same for the lifetime of the service instance, however it is
- // acceptable that the ID is ephemeral and changes during important
- // lifetime events for the service (e.g. service restarts). If the service
- // has no inherent unique ID that can be used as the value of this
- // attribute it is recommended to generate a random Version 1 or Version 4
- // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
- // Version 5, see RFC 4122 for more recommendations).
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'opentelemetry'
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetryAutoVersionKey is the attribute Key conforming to the
- // "telemetry.auto.version" semantic conventions. It represents the version
- // string of the auto instrumentation agent, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.2.3'
- TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
-)
-
-// TelemetryAutoVersion returns an attribute KeyValue conforming to the
-// "telemetry.auto.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent, if used.
-func TelemetryAutoVersion(val string) attribute.KeyValue {
- return TelemetryAutoVersionKey.String(val)
-}
-
-// Resource describing the packaged software running the application code. Web
-// engines are typically executed using process.runtime.
-const (
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-)
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry
-// Scope's concepts.
-const (
- // OTelLibraryNameKey is the attribute Key conforming to the
- // "otel.library.name" semantic conventions. It represents the deprecated,
- // use the `otel.scope.name` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelLibraryNameKey = attribute.Key("otel.library.name")
-
- // OTelLibraryVersionKey is the attribute Key conforming to the
- // "otel.library.version" semantic conventions. It represents the
- // deprecated, use the `otel.scope.version` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '1.0.0'
- OTelLibraryVersionKey = attribute.Key("otel.library.version")
-)
-
-// OTelLibraryName returns an attribute KeyValue conforming to the
-// "otel.library.name" semantic conventions. It represents the deprecated, use
-// the `otel.scope.name` attribute.
-func OTelLibraryName(val string) attribute.KeyValue {
- return OTelLibraryNameKey.String(val)
-}
-
-// OTelLibraryVersion returns an attribute KeyValue conforming to the
-// "otel.library.version" semantic conventions. It represents the deprecated,
-// use the `otel.scope.version` attribute.
-func OTelLibraryVersion(val string) attribute.KeyValue {
- return OTelLibraryVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
deleted file mode 100644
index 95d0210e3..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/
-const SchemaURL = "https://opentelemetry.io/schemas/1.20.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
deleted file mode 100644
index 90b1b0452..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go
+++ /dev/null
@@ -1,2599 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-)
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// The attributes described in this section are rather generic. They may be
-// used in any Log Record they apply to.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Span attributes used by AWS Lambda (in addition to general `faas`
-// attributes).
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for CloudEvents. CloudEvents is a specification on how to define
-// event data in a standard way. These attributes can be attached to spans when
-// performing operations with CloudEvents, regardless of the protocol being
-// used.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// Semantic conventions for the OpenTracing Shim
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span does not depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The attributes used to perform database client calls.
-const (
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents an identifier for the database management
- // system (DBMS) product being used. See below for a list of well-known
- // identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- DBSystemKey = attribute.Key("db.system")
-
- // DBConnectionStringKey is the attribute Key conforming to the
- // "db.connection_string" semantic conventions. It represents the
- // connection string used to connect to the database. It is recommended to
- // remove embedded credentials.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
- DBConnectionStringKey = attribute.Key("db.connection_string")
-
- // DBUserKey is the attribute Key conforming to the "db.user" semantic
- // conventions. It represents the username for accessing the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'readonly_user', 'reporting_user'
- DBUserKey = attribute.Key("db.user")
-
- // DBJDBCDriverClassnameKey is the attribute Key conforming to the
- // "db.jdbc.driver_classname" semantic conventions. It represents the
- // fully-qualified class name of the [Java Database Connectivity
- // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
- // driver used to connect.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'org.postgresql.Driver',
- // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
- DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
-
- // DBNameKey is the attribute Key conforming to the "db.name" semantic
- // conventions. It represents the this attribute is used to report the name
- // of the database being accessed. For commands that switch the database,
- // this should be set to the target database (even if the command fails).
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If applicable.)
- // Stability: stable
- // Examples: 'customers', 'main'
- // Note: In some SQL databases, the database name to be used is called
- // "schema name". In case there are multiple layers that could be
- // considered for database name (e.g. Oracle instance name and schema
- // name), the database name to be used is the more specific layer (e.g.
- // Oracle schema name).
- DBNameKey = attribute.Key("db.name")
-
- // DBStatementKey is the attribute Key conforming to the "db.statement"
- // semantic conventions. It represents the database statement being
- // executed.
- //
- // Type: string
- // RequirementLevel: Recommended (Should be collected by default only if
- // there is sanitization that excludes sensitive information.)
- // Stability: stable
- // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
- DBStatementKey = attribute.Key("db.statement")
-
- // DBOperationKey is the attribute Key conforming to the "db.operation"
- // semantic conventions. It represents the name of the operation being
- // executed, e.g. the [MongoDB command
- // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
- // such as `findAndModify`, or the SQL keyword.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If `db.statement` is not
- // applicable.)
- // Stability: stable
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: When setting this to an SQL keyword, it is not recommended to
- // attempt any client-side parsing of `db.statement` just to get this
- // property, but it should be set if the operation name is provided by the
- // library being instrumented. If the SQL statement has an ambiguous
- // operation, or performs more than one operation, this value may be
- // omitted.
- DBOperationKey = attribute.Key("db.operation")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBConnectionString returns an attribute KeyValue conforming to the
-// "db.connection_string" semantic conventions. It represents the connection
-// string used to connect to the database. It is recommended to remove embedded
-// credentials.
-func DBConnectionString(val string) attribute.KeyValue {
- return DBConnectionStringKey.String(val)
-}
-
-// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
-// conventions. It represents the username for accessing the database.
-func DBUser(val string) attribute.KeyValue {
- return DBUserKey.String(val)
-}
-
-// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
-// "db.jdbc.driver_classname" semantic conventions. It represents the
-// fully-qualified class name of the [Java Database Connectivity
-// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
-// used to connect.
-func DBJDBCDriverClassname(val string) attribute.KeyValue {
- return DBJDBCDriverClassnameKey.String(val)
-}
-
-// DBName returns an attribute KeyValue conforming to the "db.name" semantic
-// conventions. It represents the this attribute is used to report the name of
-// the database being accessed. For commands that switch the database, this
-// should be set to the target database (even if the command fails).
-func DBName(val string) attribute.KeyValue {
- return DBNameKey.String(val)
-}
-
-// DBStatement returns an attribute KeyValue conforming to the
-// "db.statement" semantic conventions. It represents the database statement
-// being executed.
-func DBStatement(val string) attribute.KeyValue {
- return DBStatementKey.String(val)
-}
-
-// DBOperation returns an attribute KeyValue conforming to the
-// "db.operation" semantic conventions. It represents the name of the operation
-// being executed, e.g. the [MongoDB command
-// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
-// such as `findAndModify`, or the SQL keyword.
-func DBOperation(val string) attribute.KeyValue {
- return DBOperationKey.String(val)
-}
-
-// Connection-level attributes for Microsoft SQL Server
-const (
- // DBMSSQLInstanceNameKey is the attribute Key conforming to the
- // "db.mssql.instance_name" semantic conventions. It represents the
- // Microsoft SQL Server [instance
- // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
- // connecting to. This name is used to determine the port of a named
- // instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MSSQLSERVER'
- // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
- // longer required (but still recommended if non-standard).
- DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-)
-
-// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
-// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
-// SQL Server [instance
-// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
-// connecting to. This name is used to determine the port of a named instance.
-func DBMSSQLInstanceName(val string) attribute.KeyValue {
- return DBMSSQLInstanceNameKey.String(val)
-}
-
-// Call-level attributes for Cassandra
-const (
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraTableKey is the attribute Key conforming to the
- // "db.cassandra.table" semantic conventions. It represents the name of the
- // primary table that the operation is acting upon, including the keyspace
- // name (if applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'mytable'
- // Note: This mirrors the db.sql.table attribute but references cassandra
- // rather than sql. It is not recommended to attempt any client-side
- // parsing of `db.statement` just to get this property, but it should be
- // set if it is provided by the library being instrumented. If the
- // operation is acting upon an anonymous table, or more than one table,
- // this value MUST NOT be set.
- DBCassandraTableKey = attribute.Key("db.cassandra.table")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraTable returns an attribute KeyValue conforming to the
-// "db.cassandra.table" semantic conventions. It represents the name of the
-// primary table that the operation is acting upon, including the keyspace name
-// (if applicable).
-func DBCassandraTable(val string) attribute.KeyValue {
- return DBCassandraTableKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// Call-level attributes for Redis
-const (
- // DBRedisDBIndexKey is the attribute Key conforming to the
- // "db.redis.database_index" semantic conventions. It represents the index
- // of the database being accessed as used in the [`SELECT`
- // command](https://redis.io/commands/select), provided as an integer. To
- // be used instead of the generic `db.name` attribute.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If other than the default
- // database (`0`).)
- // Stability: stable
- // Examples: 0, 1, 15
- DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-)
-
-// DBRedisDBIndex returns an attribute KeyValue conforming to the
-// "db.redis.database_index" semantic conventions. It represents the index of
-// the database being accessed as used in the [`SELECT`
-// command](https://redis.io/commands/select), provided as an integer. To be
-// used instead of the generic `db.name` attribute.
-func DBRedisDBIndex(val int) attribute.KeyValue {
- return DBRedisDBIndexKey.Int(val)
-}
-
-// Call-level attributes for MongoDB
-const (
- // DBMongoDBCollectionKey is the attribute Key conforming to the
- // "db.mongodb.collection" semantic conventions. It represents the
- // collection being accessed within the database stated in `db.name`.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'customers', 'products'
- DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-)
-
-// DBMongoDBCollection returns an attribute KeyValue conforming to the
-// "db.mongodb.collection" semantic conventions. It represents the collection
-// being accessed within the database stated in `db.name`.
-func DBMongoDBCollection(val string) attribute.KeyValue {
- return DBMongoDBCollectionKey.String(val)
-}
-
-// Call-level attributes for SQL databases
-const (
- // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
- // semantic conventions. It represents the name of the primary table that
- // the operation is acting upon, including the database name (if
- // applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'public.users', 'customers'
- // Note: It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting
- // upon an anonymous table, or more than one table, this value MUST NOT be
- // set.
- DBSQLTableKey = attribute.Key("db.sql.table")
-)
-
-// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
-// semantic conventions. It represents the name of the primary table that the
-// operation is acting upon, including the database name (if applicable).
-func DBSQLTable(val string) attribute.KeyValue {
- return DBSQLTableKey.String(val)
-}
-
-// Call-level attributes for Cosmos DB.
-const (
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (when performing one of the
- // operations in this list)
- // Stability: stable
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
- // default))
- // Stability: stable
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBContainerKey is the attribute Key conforming to the
- // "db.cosmosdb.container" semantic conventions. It represents the cosmos
- // DB container name.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if available)
- // Stability: stable
- // Examples: 'anystring'
- DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (if response was received)
- // Stability: stable
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (when response was received and
- // contained sub-code.)
- // Stability: stable
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: ConditionallyRequired (when available)
- // Stability: stable
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBContainer returns an attribute KeyValue conforming to the
-// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
-// container name.
-func DBCosmosDBContainer(val string) attribute.KeyValue {
- return DBCosmosDBContainerKey.String(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
-// concepts.
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// This semantic convention describes an instance of a function that runs
-// without provisioning or managing of servers (also known as serverless
-// functions or Function as a Service (FaaS)) with spans.
-const (
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: For the server/consumer span on the incoming side,
- // `faas.trigger` MUST be set.
- //
- // Clients invoking FaaS instances usually cannot set `faas.trigger`,
- // since they would typically need to look in the payload to determine
- // the event type. If clients set it, it should be the same as the
- // trigger that corresponding incoming would have (i.e., this has
- // nothing to do with the underlying transport used to make the API
- // call to invoke the lambda, which is often HTTP).
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// Semantic Convention for FaaS triggered as a response to some data source
-// operation such as a database or filesystem read/write.
-const (
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-)
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// Contains additional attributes for outgoing FaaS spans.
-const (
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (For some cloud providers, like
- // AWS or GCP, the region in which a function is hosted is essential to
- // uniquely identify the function and also part of its endpoint. Since it's
- // part of the endpoint being called, the region is always known to
- // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
- // If the region is unknown to the client or not required for identifying
- // the invoked function, setting `faas.invoked_region` is optional.)
- // Stability: stable
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](../../resource/semantic_conventions/README.md#service)
- // of the remote service. SHOULD be equal to the actual `service.name`
- // resource attribute of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](../../resource/semantic_conventions/README.md#service) of
-// the remote service. SHOULD be equal to the actual `service.name` resource
-// attribute of the remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// These attributes may be used for any operation with an authenticated and/or
-// authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-)
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// Semantic Convention for HTTP Client
-const (
- // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
- // conventions. It represents the full HTTP request URL in the form
- // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
- // not transmitted over HTTP, but if it is known, it should be included
- // nevertheless.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
- // Note: `http.url` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case the
- // attribute's value should be `https://www.example.com/`.
- HTTPURLKey = attribute.Key("http.url")
-
- // HTTPResendCountKey is the attribute Key conforming to the
- // "http.resend_count" semantic conventions. It represents the ordinal
- // number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Recommended (if and only if request was retried.)
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPResendCountKey = attribute.Key("http.resend_count")
-)
-
-// HTTPURL returns an attribute KeyValue conforming to the "http.url"
-// semantic conventions. It represents the full HTTP request URL in the form
-// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
-// transmitted over HTTP, but if it is known, it should be included
-// nevertheless.
-func HTTPURL(val string) attribute.KeyValue {
- return HTTPURLKey.String(val)
-}
-
-// HTTPResendCount returns an attribute KeyValue conforming to the
-// "http.resend_count" semantic conventions. It represents the ordinal number
-// of request resending attempt (for any reason, including redirects).
-func HTTPResendCount(val int) attribute.KeyValue {
- return HTTPResendCountKey.Int(val)
-}
-
-// Semantic Convention for HTTP Server
-const (
- // HTTPTargetKey is the attribute Key conforming to the "http.target"
- // semantic conventions. It represents the full request target as passed in
- // a HTTP request line or equivalent.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '/users/12314/?q=ddds'
- HTTPTargetKey = attribute.Key("http.target")
-
- // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
- // semantic conventions. It represents the IP address of the original
- // client behind all proxies, if known (e.g. from
- // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '83.164.160.102'
- // Note: This is not necessarily the same as `net.sock.peer.addr`, which
- // would
- // identify the network-level peer, which may be a proxy.
- //
- // This attribute should be set when a source of information different
- // from the one used for `net.sock.peer.addr`, is available even if that
- // other
- // source just confirms the same value as `net.sock.peer.addr`.
- // Rationale: For `net.sock.peer.addr`, one typically does not know if it
- // comes from a proxy, reverse proxy, or the actual client. Setting
- // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
- // one is at least somewhat confident that the address is not that of
- // the closest proxy.
- HTTPClientIPKey = attribute.Key("http.client_ip")
-)
-
-// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
-// semantic conventions. It represents the full request target as passed in a
-// HTTP request line or equivalent.
-func HTTPTarget(val string) attribute.KeyValue {
- return HTTPTargetKey.String(val)
-}
-
-// HTTPClientIP returns an attribute KeyValue conforming to the
-// "http.client_ip" semantic conventions. It represents the IP address of the
-// original client behind all proxies, if known (e.g. from
-// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
-func HTTPClientIP(val string) attribute.KeyValue {
- return HTTPClientIPKey.String(val)
-}
-
-// The `aws` conventions apply to operations using the AWS SDK. They map
-// request or response parameters in AWS SDK API calls to attributes on a Span.
-// The conventions have been collected over time based on feedback from AWS
-// users of tracing and will continue to evolve as new interesting conventions
-// are found.
-// Some descriptions are also provided for populating general OpenTelemetry
-// semantic conventions based on these APIs.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-)
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// DynamoDB.CreateTable
-const (
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// DynamoDB.ListTables
-const (
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the the
-// number of items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// DynamoDB.Query
-const (
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// DynamoDB.Scan
-const (
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-)
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// DynamoDB.UpdateTable
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // the `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// Attributes that exist for S3 request types.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// Semantic conventions to apply when instrumenting the GraphQL implementation.
-// They map GraphQL operations to attributes on a Span.
-const (
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// General attributes used in messaging systems.
-const (
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents a string
- // identifying the messaging system.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
- MessagingSystemKey = attribute.Key("messaging.system")
-
- // MessagingOperationKey is the attribute Key conforming to the
- // "messaging.operation" semantic conventions. It represents a string
- // identifying the kind of messaging operation as defined in the [Operation
- // names](#operation-names) section above.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationKey = attribute.Key("messaging.operation")
-
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the span describes an
- // operation on a batch of messages.)
- // Stability: stable
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-)
-
-var (
- // publish
- MessagingOperationPublish = MessagingOperationKey.String("publish")
- // receive
- MessagingOperationReceive = MessagingOperationKey.String("receive")
- // process
- MessagingOperationProcess = MessagingOperationKey.String("process")
-)
-
-// MessagingSystem returns an attribute KeyValue conforming to the
-// "messaging.system" semantic conventions. It represents a string identifying
-// the messaging system.
-func MessagingSystem(val string) attribute.KeyValue {
- return MessagingSystemKey.String(val)
-}
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// Semantic convention for a consumer of messages received from a messaging
-// system
-const (
- // MessagingConsumerIDKey is the attribute Key conforming to the
- // "messaging.consumer.id" semantic conventions. It represents the
- // identifier for the consumer receiving a message. For Kafka, set it to
- // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
- // both are present, or only `messaging.kafka.consumer.group`. For brokers,
- // such as RabbitMQ and Artemis, set it to the `client_id` of the client
- // consuming the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mygroup - client-6'
- MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
-)
-
-// MessagingConsumerID returns an attribute KeyValue conforming to the
-// "messaging.consumer.id" semantic conventions. It represents the identifier
-// for the consumer receiving a message. For Kafka, set it to
-// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
-// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
-// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
-// message.
-func MessagingConsumerID(val string) attribute.KeyValue {
- return MessagingConsumerIDKey.String(val)
-}
-
-// Semantic conventions for remote procedure calls.
-const (
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCSystemKey = attribute.Key("rpc.system")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// Tech-specific attributes for gRPC.
-const (
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
-const (
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // does not specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If other than the default
- // version (`1.0`))
- // Stability: stable
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If response is not successful.)
- // Stability: stable
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-)
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// does not specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// Tech-specific attributes for Connect RPC.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (If response is not successful
- // and if error code available.)
- // Stability: stable
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/MIGRATION.md
deleted file mode 100644
index 8a11ea28d..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/MIGRATION.md
+++ /dev/null
@@ -1,155 +0,0 @@
-# Semantic Convention Changes
-
-The `go.opentelemetry.io/otel/semconv/v1.30.0` should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.28.0` with the following exceptions.
-
-Note: `go.opentelemetry.io/otel/semconv/v1.29.0` does not exist due to bugs from the upstream [OpenTelemetry Semantic Conventions].
-
-## Dropped deprecations
-
-The following declarations have been deprecated in the [OpenTelemetry Semantic Conventions].
-Refer to the respective documentation in that repository for deprecation instructions for each type.
-
-- `CodeColumn`
-- `CodeColumnKey`
-- `CodeFunction`
-- `CodeFunctionKey`
-- `DBCassandraConsistencyLevelAll`
-- `DBCassandraConsistencyLevelAny`
-- `DBCassandraConsistencyLevelEachQuorum`
-- `DBCassandraConsistencyLevelKey`
-- `DBCassandraConsistencyLevelLocalOne`
-- `DBCassandraConsistencyLevelLocalQuorum`
-- `DBCassandraConsistencyLevelLocalSerial`
-- `DBCassandraConsistencyLevelOne`
-- `DBCassandraConsistencyLevelQuorum`
-- `DBCassandraConsistencyLevelSerial`
-- `DBCassandraConsistencyLevelThree`
-- `DBCassandraConsistencyLevelTwo`
-- `DBCassandraCoordinatorDC`
-- `DBCassandraCoordinatorDCKey`
-- `DBCassandraCoordinatorID`
-- `DBCassandraCoordinatorIDKey`
-- `DBCassandraIdempotence`
-- `DBCassandraIdempotenceKey`
-- `DBCassandraPageSize`
-- `DBCassandraPageSizeKey`
-- `DBCassandraSpeculativeExecutionCount`
-- `DBCassandraSpeculativeExecutionCountKey`
-- `DBCosmosDBClientID`
-- `DBCosmosDBClientIDKey`
-- `DBCosmosDBConnectionModeDirect`
-- `DBCosmosDBConnectionModeGateway`
-- `DBCosmosDBConnectionModeKey`
-- `DBCosmosDBOperationTypeBatch`
-- `DBCosmosDBOperationTypeCreate`
-- `DBCosmosDBOperationTypeDelete`
-- `DBCosmosDBOperationTypeExecute`
-- `DBCosmosDBOperationTypeExecuteJavascript`
-- `DBCosmosDBOperationTypeHead`
-- `DBCosmosDBOperationTypeHeadFeed`
-- `DBCosmosDBOperationTypeInvalid`
-- `DBCosmosDBOperationTypeKey`
-- `DBCosmosDBOperationTypePatch`
-- `DBCosmosDBOperationTypeQuery`
-- `DBCosmosDBOperationTypeQueryPlan`
-- `DBCosmosDBOperationTypeRead`
-- `DBCosmosDBOperationTypeReadFeed`
-- `DBCosmosDBOperationTypeReplace`
-- `DBCosmosDBOperationTypeUpsert`
-- `DBCosmosDBRequestCharge`
-- `DBCosmosDBRequestChargeKey`
-- `DBCosmosDBRequestContentLength`
-- `DBCosmosDBRequestContentLengthKey`
-- `DBCosmosDBSubStatusCode`
-- `DBCosmosDBSubStatusCodeKey`
-- `DBElasticsearchNodeName`
-- `DBElasticsearchNodeNameKey`
-- `DBSystemAdabas`
-- `DBSystemCache`
-- `DBSystemCassandra`
-- `DBSystemClickhouse`
-- `DBSystemCloudscape`
-- `DBSystemCockroachdb`
-- `DBSystemColdfusion`
-- `DBSystemCosmosDB`
-- `DBSystemCouchDB`
-- `DBSystemCouchbase`
-- `DBSystemDb2`
-- `DBSystemDerby`
-- `DBSystemDynamoDB`
-- `DBSystemEDB`
-- `DBSystemElasticsearch`
-- `DBSystemFilemaker`
-- `DBSystemFirebird`
-- `DBSystemFirstSQL`
-- `DBSystemGeode`
-- `DBSystemH2`
-- `DBSystemHBase`
-- `DBSystemHSQLDB`
-- `DBSystemHanaDB`
-- `DBSystemHive`
-- `DBSystemInfluxdb`
-- `DBSystemInformix`
-- `DBSystemIngres`
-- `DBSystemInstantDB`
-- `DBSystemInterbase`
-- `DBSystemIntersystemsCache`
-- `DBSystemKey`
-- `DBSystemMSSQL`
-- `DBSystemMariaDB`
-- `DBSystemMaxDB`
-- `DBSystemMemcached`
-- `DBSystemMongoDB`
-- `DBSystemMssqlcompact`
-- `DBSystemMySQL`
-- `DBSystemNeo4j`
-- `DBSystemNetezza`
-- `DBSystemOpensearch`
-- `DBSystemOracle`
-- `DBSystemOtherSQL`
-- `DBSystemPervasive`
-- `DBSystemPointbase`
-- `DBSystemPostgreSQL`
-- `DBSystemProgress`
-- `DBSystemRedis`
-- `DBSystemRedshift`
-- `DBSystemSpanner`
-- `DBSystemSqlite`
-- `DBSystemSybase`
-- `DBSystemTeradata`
-- `DBSystemTrino`
-- `DBSystemVertica`
-- `EventName`
-- `EventNameKey`
-- `ExceptionEscaped`
-- `ExceptionEscapedKey`
-- `GenAIOpenaiRequestSeed`
-- `GenAIOpenaiRequestSeedKey`
-- `ProcessExecutableBuildIDProfiling`
-- `ProcessExecutableBuildIDProfilingKey`
-- `SystemNetworkStateClose`
-- `SystemNetworkStateCloseWait`
-- `SystemNetworkStateClosing`
-- `SystemNetworkStateDelete`
-- `SystemNetworkStateEstablished`
-- `SystemNetworkStateFinWait1`
-- `SystemNetworkStateFinWait2`
-- `SystemNetworkStateKey`
-- `SystemNetworkStateLastAck`
-- `SystemNetworkStateListen`
-- `SystemNetworkStateSynRecv`
-- `SystemNetworkStateSynSent`
-- `SystemNetworkStateTimeWait`
-- `VCSRepositoryChangeID`
-- `VCSRepositoryChangeIDKey`
-- `VCSRepositoryChangeTitle`
-- `VCSRepositoryChangeTitleKey`
-- `VCSRepositoryRefName`
-- `VCSRepositoryRefNameKey`
-- `VCSRepositoryRefRevision`
-- `VCSRepositoryRefRevisionKey`
-- `VCSRepositoryRefTypeBranch`
-- `VCSRepositoryRefTypeKey`
-- `VCSRepositoryRefTypeTag`
-
-[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/README.md
deleted file mode 100644
index 072ea6928..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.30.0
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.30.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/attribute_group.go
deleted file mode 100644
index 60f3df0db..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.30.0/attribute_group.go
+++ /dev/null
@@ -1,12333 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.30.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Namespace: android
-const (
- // AndroidOSAPILevelKey is the attribute Key conforming to the
- // "android.os.api_level" semantic conventions. It represents the uniquely
- // identifies the framework API revision offered by a version (`os.version`) of
- // the android operating system. More information can be found [here].
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "33", "32"
- //
- // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
- AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
-)
-
-// AndroidOSAPILevel returns an attribute KeyValue conforming to the
-// "android.os.api_level" semantic conventions. It represents the uniquely
-// identifies the framework API revision offered by a version (`os.version`) of
-// the android operating system. More information can be found [here].
-//
-// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
-func AndroidOSAPILevel(val string) attribute.KeyValue {
- return AndroidOSAPILevelKey.String(val)
-}
-
-// Namespace: artifact
-const (
- // ArtifactAttestationFilenameKey is the attribute Key conforming to the
- // "artifact.attestation.filename" semantic conventions. It represents the
- // provenance filename of the built attestation which directly relates to the
- // build artifact filename. This filename SHOULD accompany the artifact at
- // publish time. See the [SLSA Relationship] specification for more information.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "golang-binary-amd64-v0.1.0.attestation",
- // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation",
- // "file-name-package.tar.gz.intoto.json1"
- //
- // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
- ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename")
-
- // ArtifactAttestationHashKey is the attribute Key conforming to the
- // "artifact.attestation.hash" semantic conventions. It represents the full
- // [hash value (see glossary)], of the built attestation. Some envelopes in the
- // [software attestation space] also refer to this as the **digest**.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408"
- //
- // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
- // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
- ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash")
-
- // ArtifactAttestationIDKey is the attribute Key conforming to the
- // "artifact.attestation.id" semantic conventions. It represents the id of the
- // build [software attestation].
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "123"
- //
- // [software attestation]: https://slsa.dev/attestation-model
- ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id")
-
- // ArtifactFilenameKey is the attribute Key conforming to the
- // "artifact.filename" semantic conventions. It represents the human readable
- // file name of the artifact, typically generated during build and release
- // processes. Often includes the package name and version in the file name.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0",
- // "release-1.tar.gz", "file-name-package.tar.gz"
- // Note: This file name can also act as the [Package Name]
- // in cases where the package ecosystem maps accordingly.
- // Additionally, the artifact [can be published]
- // for others, but that is not a guarantee.
- //
- // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model
- // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain
- ArtifactFilenameKey = attribute.Key("artifact.filename")
-
- // ArtifactHashKey is the attribute Key conforming to the "artifact.hash"
- // semantic conventions. It represents the full [hash value (see glossary)],
- // often found in checksum.txt on a release of the artifact and used to verify
- // package integrity.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9"
- // Note: The specific algorithm used to create the cryptographic hash value is
- // not defined. In situations where an artifact has multiple
- // cryptographic hashes, it is up to the implementer to choose which
- // hash value to set here; this should be the most secure hash algorithm
- // that is suitable for the situation and consistent with the
- // corresponding attestation. The implementer can then provide the other
- // hash values through an additional set of attribute extensions as they
- // deem necessary.
- //
- // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
- ArtifactHashKey = attribute.Key("artifact.hash")
-
- // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl"
- // semantic conventions. It represents the [Package URL] of the
- // [package artifact] provides a standard way to identify and locate the
- // packaged artifact.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "pkg:github/package-url/purl-spec@1209109710924",
- // "pkg:npm/foo@12.12.3"
- //
- // [Package URL]: https://github.com/package-url/purl-spec
- // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
- ArtifactPurlKey = attribute.Key("artifact.purl")
-
- // ArtifactVersionKey is the attribute Key conforming to the "artifact.version"
- // semantic conventions. It represents the version of the artifact.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "v0.1.0", "1.2.1", "122691-build"
- ArtifactVersionKey = attribute.Key("artifact.version")
-)
-
-// ArtifactAttestationFilename returns an attribute KeyValue conforming to the
-// "artifact.attestation.filename" semantic conventions. It represents the
-// provenance filename of the built attestation which directly relates to the
-// build artifact filename. This filename SHOULD accompany the artifact at
-// publish time. See the [SLSA Relationship] specification for more information.
-//
-// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
-func ArtifactAttestationFilename(val string) attribute.KeyValue {
- return ArtifactAttestationFilenameKey.String(val)
-}
-
-// ArtifactAttestationHash returns an attribute KeyValue conforming to the
-// "artifact.attestation.hash" semantic conventions. It represents the full
-// [hash value (see glossary)], of the built attestation. Some envelopes in the
-// [software attestation space] also refer to this as the **digest**.
-//
-// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
-// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
-func ArtifactAttestationHash(val string) attribute.KeyValue {
- return ArtifactAttestationHashKey.String(val)
-}
-
-// ArtifactAttestationID returns an attribute KeyValue conforming to the
-// "artifact.attestation.id" semantic conventions. It represents the id of the
-// build [software attestation].
-//
-// [software attestation]: https://slsa.dev/attestation-model
-func ArtifactAttestationID(val string) attribute.KeyValue {
- return ArtifactAttestationIDKey.String(val)
-}
-
-// ArtifactFilename returns an attribute KeyValue conforming to the
-// "artifact.filename" semantic conventions. It represents the human readable
-// file name of the artifact, typically generated during build and release
-// processes. Often includes the package name and version in the file name.
-func ArtifactFilename(val string) attribute.KeyValue {
- return ArtifactFilenameKey.String(val)
-}
-
-// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash"
-// semantic conventions. It represents the full [hash value (see glossary)],
-// often found in checksum.txt on a release of the artifact and used to verify
-// package integrity.
-//
-// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
-func ArtifactHash(val string) attribute.KeyValue {
- return ArtifactHashKey.String(val)
-}
-
-// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl"
-// semantic conventions. It represents the [Package URL] of the
-// [package artifact] provides a standard way to identify and locate the packaged
-// artifact.
-//
-// [Package URL]: https://github.com/package-url/purl-spec
-// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
-func ArtifactPurl(val string) attribute.KeyValue {
- return ArtifactPurlKey.String(val)
-}
-
-// ArtifactVersion returns an attribute KeyValue conforming to the
-// "artifact.version" semantic conventions. It represents the version of the
-// artifact.
-func ArtifactVersion(val string) attribute.KeyValue {
- return ArtifactVersionKey.String(val)
-}
-
-// Namespace: aws
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the
- // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the
- // JSON-serialized value of each item in the `AttributeDefinitions` request
- // field.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "{ "AttributeName": "string", "AttributeType": "string" }"
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "lives", "id"
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the value
- // of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response field.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
- // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
- // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
- // "string", "WriteCapacityUnits": number }"
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of the
- // `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
- // value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "Users", "CatsTable"
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
- // number } }"
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the
- // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents
- // the JSON-serialized value of each item of the `GlobalSecondaryIndexes`
- // request field.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }"
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value of
- // the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "name_to_group"
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the
- // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
- // the JSON-serialized value of the `ItemCollectionMetrics` response field.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
- // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
- // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
- // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }"
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of the
- // `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the
- // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
- // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
- // field.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes":
- // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
- // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
- // "ProjectionType": "string" } }"
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value of
- // the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems,
- // ProductReviews"
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents
- // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents
- // the value of the `ProvisionedThroughput.WriteCapacityUnits` request
- // parameter.
- //
- // Type: double
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
- // the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
- // the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of the
- // `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of the
- // `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "ALL_ATTRIBUTES", "COUNT"
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the number of
- // items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys in
- // the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "Users", "Cats"
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the value
- // of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS cluster].
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
- //
- // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container instance].
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9"
- //
- // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch type]
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- //
- // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn"
- // semantic conventions. It represents the ARN of a running [ECS task].
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b",
- // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
- //
- // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the family name of
- // the [ECS task definition] used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "opentelemetry-family"
- //
- // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
- // semantic conventions. It represents the ID of a running ECS task. The ID MUST
- // be extracted from `task.arn`.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b",
- // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
- AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision for
- // the task definition used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "8", "26"
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
- // cluster.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-
- // AWSExtendedRequestIDKey is the attribute Key conforming to the
- // "aws.extended_request_id" semantic conventions. It represents the AWS
- // extended request ID as returned in the response header `x-amz-id-2`.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ="
- AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id")
-
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
- // ARN as provided on the `Context` passed to the function (
- // `Lambda-Runtime-Invoked-Function-Arn` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias"
- // Note: This may be different from `cloud.resource_id` if an alias is involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
- // Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*"
- // Note: See the [log group ARN format documentation].
- //
- // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of the
- // AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "/aws/lambda/my-function", "opentelemetry-service"
- // Note: Multiple log groups must be supported for cases like multi-container
- // applications, where a single application has sidecar containers, and each
- // write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
- // AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
- // Note: See the [log stream ARN format documentation]. One log group can
- // contain several log streams, so these ARNs necessarily identify both a log
- // group and a log stream.
- //
- // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s) of the
- // AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in the
- // response headers `x-amzn-requestid`, `x-amzn-request-id` or
- // `x-amz-request-id`.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ"
- AWSRequestIDKey = attribute.Key("aws.request_id")
-
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request refers to.
- // Corresponds to the `--bucket` parameter of the [S3 API] operations.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "some-bucket-name"
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- //
- // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source object
- // (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "someFile.yml"
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3 API].
- // This applies in particular to the following operations:
- //
- // - [copy-object]
- // - [upload-part-copy]
- //
- //
- // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
- // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
- // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean"
- // Note: The `delete` attribute is only applicable to the [delete-object]
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3 API].
- //
- // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
- // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3 API] operations.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "someFile.yml"
- // Note: The `key` attribute is applicable to all object-related S3 operations,
- // i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // - [copy-object]
- // - [delete-object]
- // - [get-object]
- // - [head-object]
- // - [put-object]
- // - [restore-object]
- // - [select-object-content]
- // - [abort-multipart-upload]
- // - [complete-multipart-upload]
- // - [create-multipart-upload]
- // - [list-parts]
- // - [upload-part]
- // - [upload-part-copy]
- //
- //
- // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
- // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
- // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
- // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html
- // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html
- // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html
- // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html
- // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html
- // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
- // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
- // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html
- // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
- // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
- // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number of
- // the part being uploaded in a multipart-upload operation. This is a positive
- // integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the [upload-part]
- // and [upload-part-copy] operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter of
- // the
- // [upload-part operation within the S3 API].
- //
- // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
- // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
- // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id"
- // semantic conventions. It represents the upload ID that identifies the
- // multipart upload.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ"
- // Note: The `upload_id` attribute applies to S3 multipart-upload operations and
- // corresponds to the `--upload-id` parameter
- // of the [S3 API] multipart operations.
- // This applies in particular to the following operations:
- //
- // - [abort-multipart-upload]
- // - [complete-multipart-upload]
- // - [list-parts]
- // - [upload-part]
- // - [upload-part-copy]
- //
- //
- // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
- // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
- // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
- // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
- // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
- // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents
-// the JSON-serialized value of each item in the `AttributeDefinitions` request
-// field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the
-// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value
-// of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the
-// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
-// value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to
-// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field.
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of the
-// `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to
-// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
-// the JSON-serialized value of the `ItemCollectionMetrics` response field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to
-// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
-// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
-// field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of the
-// `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
-// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request
-// parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming
-// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It
-// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request
-// parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
-// the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the number of
-// items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the
-// `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value of
-// the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
-// [ECS cluster].
-//
-// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container instance].
-//
-// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
-// [ECS task].
-//
-// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the family name of
-// the [ECS task definition] used to create the ECS task.
-//
-// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id"
-// semantic conventions. It represents the ID of a running ECS task. The ID MUST
-// be extracted from `task.arn`.
-func AWSECSTaskID(val string) attribute.KeyValue {
- return AWSECSTaskIDKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// the task definition used to create the ECS task.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// AWSExtendedRequestID returns an attribute KeyValue conforming to the
-// "aws.extended_request_id" semantic conventions. It represents the AWS extended
-// request ID as returned in the response header `x-amz-id-2`.
-func AWSExtendedRequestID(val string) attribute.KeyValue {
- return AWSExtendedRequestIDKey.String(val)
-}
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
-// ARN as provided on the `Context` passed to the function (
-// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next`
-// applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of the
-// AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id"
-// semantic conventions. It represents the AWS request ID as returned in the
-// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id`
-// .
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket"
-// semantic conventions. It represents the S3 bucket name the request refers to.
-// Corresponds to the `--bucket` parameter of the [S3 API] operations.
-//
-// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object (in
-// the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete"
-// semantic conventions. It represents the delete request container that
-// specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic
-// conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3 API] operations.
-//
-// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// Enum values for aws.ecs.launchtype
-var (
- // ec2
- // Stability: development
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- // Stability: development
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// Namespace: az
-const (
- // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic
- // conventions. It represents the [Azure Resource Provider Namespace] as
- // recognized by the client.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus"
- //
- // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
- AzNamespaceKey = attribute.Key("az.namespace")
-
- // AzServiceRequestIDKey is the attribute Key conforming to the
- // "az.service_request_id" semantic conventions. It represents the unique
- // identifier of the service request. It's generated by the Azure service and
- // returned with the response.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "00000000-0000-0000-0000-000000000000"
- AzServiceRequestIDKey = attribute.Key("az.service_request_id")
-)
-
-// AzNamespace returns an attribute KeyValue conforming to the "az.namespace"
-// semantic conventions. It represents the [Azure Resource Provider Namespace] as
-// recognized by the client.
-//
-// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
-func AzNamespace(val string) attribute.KeyValue {
- return AzNamespaceKey.String(val)
-}
-
-// AzServiceRequestID returns an attribute KeyValue conforming to the
-// "az.service_request_id" semantic conventions. It represents the unique
-// identifier of the service request. It's generated by the Azure service and
-// returned with the response.
-func AzServiceRequestID(val string) attribute.KeyValue {
- return AzServiceRequestIDKey.String(val)
-}
-
-// Namespace: azure
-const (
- // AzureClientIDKey is the attribute Key conforming to the "azure.client.id"
- // semantic conventions. It represents the unique identifier of the client
- // instance.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1"
- AzureClientIDKey = attribute.Key("azure.client.id")
-
- // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "azure.cosmosdb.connection.mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode")
-
- // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the
- // "azure.cosmosdb.consistency.level" semantic conventions. It represents the
- // account or request [consistency level].
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong",
- // "Session"
- //
- // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels
- AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level")
-
- // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to
- // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It
- // represents the list of regions contacted during operation in the order that
- // they were contacted. If there is more than one region listed, it indicates
- // that the operation was performed on multiple regions i.e. cross-regional
- // call.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "North Central US", "Australia East", "Australia Southeast"
- // Note: Region name matches the format of `displayName` in [Azure Location API]
- //
- // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location
- AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions")
-
- // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the
- // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents
- // the number of request units consumed by the operation.
- //
- // Type: double
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 46.18, 1.0
- AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge")
-
- // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the
- // "azure.cosmosdb.request.body.size" semantic conventions. It represents the
- // request payload size in bytes.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size")
-
- // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the
- // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents
- // the cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 1000, 1002
- AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code")
-)
-
-// AzureClientID returns an attribute KeyValue conforming to the
-// "azure.client.id" semantic conventions. It represents the unique identifier of
-// the client instance.
-func AzureClientID(val string) attribute.KeyValue {
- return AzureClientIDKey.String(val)
-}
-
-// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue
-// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic
-// conventions. It represents the list of regions contacted during operation in
-// the order that they were contacted. If there is more than one region listed,
-// it indicates that the operation was performed on multiple regions i.e.
-// cross-regional call.
-func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue {
- return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val)
-}
-
-// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming
-// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It
-// represents the number of request units consumed by the operation.
-func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue {
- return AzureCosmosDBOperationRequestChargeKey.Float64(val)
-}
-
-// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the
-// "azure.cosmosdb.request.body.size" semantic conventions. It represents the
-// request payload size in bytes.
-func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue {
- return AzureCosmosDBRequestBodySizeKey.Int(val)
-}
-
-// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to
-// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It
-// represents the cosmos DB sub status code.
-func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue {
- return AzureCosmosDBResponseSubStatusCodeKey.Int(val)
-}
-
-// Enum values for azure.cosmosdb.connection.mode
-var (
- // Gateway (HTTP) connection.
- // Stability: development
- AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway")
- // Direct connection.
- // Stability: development
- AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct")
-)
-
-// Enum values for azure.cosmosdb.consistency.level
-var (
- // strong
- // Stability: development
- AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong")
- // bounded_staleness
- // Stability: development
- AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness")
- // session
- // Stability: development
- AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session")
- // eventual
- // Stability: development
- AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual")
- // consistent_prefix
- // Stability: development
- AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix")
-)
-
-// Namespace: browser
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space.
- //
- // Type: string[]
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99"
- // Note: This value is intended to be taken from the [UA client hints API] (
- // `navigator.userAgentData.brands`).
- //
- // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserLanguageKey is the attribute Key conforming to the "browser.language"
- // semantic conventions. It represents the preferred language of the user using
- // the browser.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "en", "en-US", "fr", "fr-FR"
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the browser is
- // running on a mobile device.
- //
- // Type: boolean
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- // Note: This value is intended to be taken from the [UA client hints API] (
- // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be
- // left unset.
- //
- // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserPlatformKey is the attribute Key conforming to the "browser.platform"
- // semantic conventions. It represents the platform on which the browser is
- // running.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "Windows", "macOS", "Android"
- // Note: This value is intended to be taken from the [UA client hints API] (
- // `navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
- // be left unset in order for the values to be consistent.
- // The list of possible values is defined in the
- // [W3C User-Agent Client Hints specification]. Note that some (but not all) of
- // these values can overlap with values in the
- // [`os.type` and `os.name` attributes]. However, for consistency, the values in
- // the `browser.platform` attribute should capture the exact value that the user
- // agent provides.
- //
- // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
- // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform
- // [`os.type` and `os.name` attributes]: ./os.md
- BrowserPlatformKey = attribute.Key("browser.platform")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands"
-// semantic conventions. It represents the array of brand name and version
-// separated by a space.
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred language
-// of the user using the browser.
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile"
-// semantic conventions. It represents a boolean that is true if the browser is
-// running on a mobile device.
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running.
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// Namespace: cassandra
-const (
- // CassandraConsistencyLevelKey is the attribute Key conforming to the
- // "cassandra.consistency.level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from [CQL].
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- //
- // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html
- CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level")
-
- // CassandraCoordinatorDCKey is the attribute Key conforming to the
- // "cassandra.coordinator.dc" semantic conventions. It represents the data
- // center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: us-west-2
- CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc")
-
- // CassandraCoordinatorIDKey is the attribute Key conforming to the
- // "cassandra.coordinator.id" semantic conventions. It represents the ID of the
- // coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af
- CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id")
-
- // CassandraPageSizeKey is the attribute Key conforming to the
- // "cassandra.page.size" semantic conventions. It represents the fetch size used
- // for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 5000
- CassandraPageSizeKey = attribute.Key("cassandra.page.size")
-
- // CassandraQueryIdempotentKey is the attribute Key conforming to the
- // "cassandra.query.idempotent" semantic conventions. It represents the whether
- // or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent")
-
- // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the
- // "cassandra.speculative_execution.count" semantic conventions. It represents
- // the number of times a query was speculatively executed. Not set or `0` if the
- // query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 0, 2
- CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count")
-)
-
-// CassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "cassandra.coordinator.dc" semantic conventions. It represents the data center
-// of the coordinating node for a query.
-func CassandraCoordinatorDC(val string) attribute.KeyValue {
- return CassandraCoordinatorDCKey.String(val)
-}
-
-// CassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "cassandra.coordinator.id" semantic conventions. It represents the ID of the
-// coordinating node for a query.
-func CassandraCoordinatorID(val string) attribute.KeyValue {
- return CassandraCoordinatorIDKey.String(val)
-}
-
-// CassandraPageSize returns an attribute KeyValue conforming to the
-// "cassandra.page.size" semantic conventions. It represents the fetch size used
-// for paging, i.e. how many rows will be returned at once.
-func CassandraPageSize(val int) attribute.KeyValue {
- return CassandraPageSizeKey.Int(val)
-}
-
-// CassandraQueryIdempotent returns an attribute KeyValue conforming to the
-// "cassandra.query.idempotent" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func CassandraQueryIdempotent(val bool) attribute.KeyValue {
- return CassandraQueryIdempotentKey.Bool(val)
-}
-
-// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to
-// the "cassandra.speculative_execution.count" semantic conventions. It
-// represents the number of times a query was speculatively executed. Not set or
-// `0` if the query was not executed speculatively.
-func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return CassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// Enum values for cassandra.consistency.level
-var (
- // all
- // Stability: development
- CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all")
- // each_quorum
- // Stability: development
- CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- // Stability: development
- CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- // Stability: development
- CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum")
- // one
- // Stability: development
- CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one")
- // two
- // Stability: development
- CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two")
- // three
- // Stability: development
- CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three")
- // local_one
- // Stability: development
- CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one")
- // any
- // Stability: development
- CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any")
- // serial
- // Stability: development
- CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial")
- // local_serial
- // Stability: development
- CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial")
-)
-
-// Namespace: cicd
-const (
- // CICDPipelineNameKey is the attribute Key conforming to the
- // "cicd.pipeline.name" semantic conventions. It represents the human readable
- // name of the pipeline within a CI/CD system.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "Build and Test", "Lint", "Deploy Go Project",
- // "deploy_to_environment"
- CICDPipelineNameKey = attribute.Key("cicd.pipeline.name")
-
- // CICDPipelineResultKey is the attribute Key conforming to the
- // "cicd.pipeline.result" semantic conventions. It represents the result of a
- // pipeline run.
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "success", "failure", "timeout", "skipped"
- CICDPipelineResultKey = attribute.Key("cicd.pipeline.result")
-
- // CICDPipelineRunIDKey is the attribute Key conforming to the
- // "cicd.pipeline.run.id" semantic conventions. It represents the unique
- // identifier of a pipeline run within a CI/CD system.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "120912"
- CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id")
-
- // CICDPipelineRunStateKey is the attribute Key conforming to the
- // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline
- // run goes through these states during its lifecycle.
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "pending", "executing", "finalizing"
- CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state")
-
- // CICDPipelineTaskNameKey is the attribute Key conforming to the
- // "cicd.pipeline.task.name" semantic conventions. It represents the human
- // readable name of a task within a pipeline. Task here most closely aligns with
- // a [computing process] in a pipeline. Other terms for tasks include commands,
- // steps, and procedures.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary"
- //
- // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
- CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name")
-
- // CICDPipelineTaskRunIDKey is the attribute Key conforming to the
- // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
- // identifier of a task run within a pipeline.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "12097"
- CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id")
-
- // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the
- // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
- // [URL] of the pipeline run providing the complete address in order to locate
- // and identify the pipeline run.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075"
- //
- // [URL]: https://wikipedia.org/wiki/URL
- CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full")
-
- // CICDPipelineTaskTypeKey is the attribute Key conforming to the
- // "cicd.pipeline.task.type" semantic conventions. It represents the type of the
- // task within a pipeline.
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "build", "test", "deploy"
- CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type")
-
- // CICDSystemComponentKey is the attribute Key conforming to the
- // "cicd.system.component" semantic conventions. It represents the name of a
- // component of the CICD system.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "controller", "scheduler", "agent"
- CICDSystemComponentKey = attribute.Key("cicd.system.component")
-
- // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state"
- // semantic conventions. It represents the state of a CICD worker / agent.
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "idle", "busy", "down"
- CICDWorkerStateKey = attribute.Key("cicd.worker.state")
-)
-
-// CICDPipelineName returns an attribute KeyValue conforming to the
-// "cicd.pipeline.name" semantic conventions. It represents the human readable
-// name of the pipeline within a CI/CD system.
-func CICDPipelineName(val string) attribute.KeyValue {
- return CICDPipelineNameKey.String(val)
-}
-
-// CICDPipelineRunID returns an attribute KeyValue conforming to the
-// "cicd.pipeline.run.id" semantic conventions. It represents the unique
-// identifier of a pipeline run within a CI/CD system.
-func CICDPipelineRunID(val string) attribute.KeyValue {
- return CICDPipelineRunIDKey.String(val)
-}
-
-// CICDPipelineTaskName returns an attribute KeyValue conforming to the
-// "cicd.pipeline.task.name" semantic conventions. It represents the human
-// readable name of a task within a pipeline. Task here most closely aligns with
-// a [computing process] in a pipeline. Other terms for tasks include commands,
-// steps, and procedures.
-//
-// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
-func CICDPipelineTaskName(val string) attribute.KeyValue {
- return CICDPipelineTaskNameKey.String(val)
-}
-
-// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the
-// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
-// identifier of a task run within a pipeline.
-func CICDPipelineTaskRunID(val string) attribute.KeyValue {
- return CICDPipelineTaskRunIDKey.String(val)
-}
-
-// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the
-// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
-// [URL] of the pipeline run providing the complete address in order to locate
-// and identify the pipeline run.
-//
-// [URL]: https://wikipedia.org/wiki/URL
-func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue {
- return CICDPipelineTaskRunURLFullKey.String(val)
-}
-
-// CICDSystemComponent returns an attribute KeyValue conforming to the
-// "cicd.system.component" semantic conventions. It represents the name of a
-// component of the CICD system.
-func CICDSystemComponent(val string) attribute.KeyValue {
- return CICDSystemComponentKey.String(val)
-}
-
-// Enum values for cicd.pipeline.result
-var (
- // The pipeline run finished successfully.
- // Stability: development
- CICDPipelineResultSuccess = CICDPipelineResultKey.String("success")
- // The pipeline run did not finish successfully, eg. due to a compile error or a
- // failing test. Such failures are usually detected by non-zero exit codes of
- // the tools executed in the pipeline run.
- // Stability: development
- CICDPipelineResultFailure = CICDPipelineResultKey.String("failure")
- // The pipeline run failed due to an error in the CICD system, eg. due to the
- // worker being killed.
- // Stability: development
- CICDPipelineResultError = CICDPipelineResultKey.String("error")
- // A timeout caused the pipeline run to be interrupted.
- // Stability: development
- CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout")
- // The pipeline run was cancelled, eg. by a user manually cancelling the
- // pipeline run.
- // Stability: development
- CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation")
- // The pipeline run was skipped, eg. due to a precondition not being met.
- // Stability: development
- CICDPipelineResultSkip = CICDPipelineResultKey.String("skip")
-)
-
-// Enum values for cicd.pipeline.run.state
-var (
- // The run pending state spans from the event triggering the pipeline run until
- // the execution of the run starts (eg. time spent in a queue, provisioning
- // agents, creating run resources).
- //
- // Stability: development
- CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending")
- // The executing state spans the execution of any run tasks (eg. build, test).
- // Stability: development
- CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing")
- // The finalizing state spans from when the run has finished executing (eg.
- // cleanup of run resources).
- // Stability: development
- CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing")
-)
-
-// Enum values for cicd.pipeline.task.type
-var (
- // build
- // Stability: development
- CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build")
- // test
- // Stability: development
- CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test")
- // deploy
- // Stability: development
- CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy")
-)
-
-// Enum values for cicd.worker.state
-var (
- // The worker is not performing work for the CICD system. It is available to the
- // CICD system to perform work on (online / idle).
- // Stability: development
- CICDWorkerStateAvailable = CICDWorkerStateKey.String("available")
- // The worker is performing work for the CICD system.
- // Stability: development
- CICDWorkerStateBusy = CICDWorkerStateKey.String("busy")
- // The worker is not available to the CICD system (disconnected / down).
- // Stability: development
- CICDWorkerStateOffline = CICDWorkerStateKey.String("offline")
-)
-
-// Namespace: client
-const (
- // ClientAddressKey is the attribute Key conforming to the "client.address"
- // semantic conventions. It represents the client address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix domain
- // socket name.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Stable
- //
- // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock"
- // Note: When observed from the server side, and when communicating through an
- // intermediary, `client.address` SHOULD represent the client address behind any
- // intermediaries, for example proxies, if it's available.
- ClientAddressKey = attribute.Key("client.address")
-
- // ClientPortKey is the attribute Key conforming to the "client.port" semantic
- // conventions. It represents the client port number.
- //
- // Type: int
- // RequirementLevel: Recommended
- // Stability: Stable
- //
- // Examples: 65123
- // Note: When observed from the server side, and when communicating through an
- // intermediary, `client.port` SHOULD represent the client port behind any
- // intermediaries, for example proxies, if it's available.
- ClientPortKey = attribute.Key("client.port")
-)
-
-// ClientAddress returns an attribute KeyValue conforming to the "client.address"
-// semantic conventions. It represents the client address - domain name if
-// available without reverse DNS lookup; otherwise, IP address or Unix domain
-// socket name.
-func ClientAddress(val string) attribute.KeyValue {
- return ClientAddressKey.String(val)
-}
-
-// ClientPort returns an attribute KeyValue conforming to the "client.port"
-// semantic conventions. It represents the client port number.
-func ClientPort(val int) attribute.KeyValue {
- return ClientPortKey.Int(val)
-}
-
-// Namespace: cloud
-const (
- // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id"
- // semantic conventions. It represents the cloud account ID the resource is
- // assigned to.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "111111111111", "opentelemetry"
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to increase
- // availability. Availability zone represents the zone where the resource is
- // running.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "us-east-1c"
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples:
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic
- // conventions. It represents the geographical region the resource is running.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "us-central1", "us-east-1"
- // Note: Refer to your provider's docs to see the available regions, for example
- // [Alibaba Cloud regions], [AWS regions], [Azure regions],
- // [Google Cloud regions], or [Tencent Cloud regions].
- //
- // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm
- // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/
- // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/
- // [Google Cloud regions]: https://cloud.google.com/about/locations
- // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id"
- // semantic conventions. It represents the cloud provider-specific native
- // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a
- // [fully qualified resource ID] on Azure, a [full resource name] on GCP).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function",
- // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID",
- // "/subscriptions//resourceGroups/
- // /providers/Microsoft.Web/sites//functions/"
- // Note: On some cloud providers, it may not be possible to determine the full
- // ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud provider.
- // The following well-known definitions MUST be used if you set this attribute
- // and they apply:
- //
- // - **AWS Lambda:** The function [ARN].
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias suffix]
- // with the resolved function version, as the same runtime instance may be
- // invocable with
- // multiple different aliases.
- // - **GCP:** The [URI of the resource]
- // - **Azure:** The [Fully Qualified Resource ID] of the invoked function,
- // *not* the function app, having the form
- //
- // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`
- // .
- // This means that a span attribute MUST be used, as an Azure function app
- // can host multiple functions that would usually share
- // a TracerProvider.
- //
- //
- // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
- // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
- // [full resource name]: https://cloud.google.com/apis/design/resource_names#full_resource_name
- // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
- // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html
- // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names
- // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the "cloud.region"
-// semantic conventions. It represents the geographical region the resource is
-// running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name]
-// on GCP).
-//
-// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
-// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
-// [full resource name]: https://cloud.google.com/apis/design/resource_names#full_resource_name
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// Enum values for cloud.platform
-var (
- // Alibaba Cloud Elastic Compute Service
- // Stability: development
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- // Stability: development
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- // Stability: development
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- // Stability: development
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- // Stability: development
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- // Stability: development
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- // Stability: development
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- // Stability: development
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- // Stability: development
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- // Stability: development
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- // Stability: development
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Apps
- // Stability: development
- CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
- // Azure Container Instances
- // Stability: development
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- // Stability: development
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- // Stability: development
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- // Stability: development
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- // Stability: development
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Bare Metal Solution (BMS)
- // Stability: development
- CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
- // Google Cloud Compute Engine (GCE)
- // Stability: development
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- // Stability: development
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- // Stability: development
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- // Stability: development
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- // Stability: development
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- // Stability: development
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- // Stability: development
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Compute on Oracle Cloud Infrastructure (OCI)
- // Stability: development
- CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute")
- // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI)
- // Stability: development
- CloudPlatformOracleCloudOke = CloudPlatformKey.String("oracle_cloud_oke")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- // Stability: development
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- // Stability: development
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- // Stability: development
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-// Enum values for cloud.provider
-var (
- // Alibaba Cloud
- // Stability: development
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- // Stability: development
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- // Stability: development
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- // Stability: development
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- // Stability: development
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- // Stability: development
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Oracle Cloud Infrastructure (OCI)
- // Stability: development
- CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud")
- // Tencent Cloud
- // Stability: development
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-// Namespace: cloudevents
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the [event_id]
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001"
- //
- // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the [source]
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123",
- // "my-service"
- //
- // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents specification] which the event uses.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: 1.0
- //
- // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the [subject]
- // of the event in the context of the event producer (identified by source).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: mynewfile.jpg
- //
- // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the [event_type]
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2"
- //
- // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the [event_id]
-// uniquely identifies the event.
-//
-// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the [source]
-// identifies the context in which an event happened.
-//
-// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to the
-// "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents specification] which the event uses.
-//
-// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the [subject]
-// of the event in the context of the event producer (identified by source).
-//
-// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the [event_type]
-// contains a value describing the type of event related to the originating
-// occurrence.
-//
-// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// Namespace: cloudfoundry
-const (
- // CloudfoundryAppIDKey is the attribute Key conforming to the
- // "cloudfoundry.app.id" semantic conventions. It represents the guid of the
- // application.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
- // Note: Application instrumentation should use the value from environment
- // variable `VCAP_APPLICATION.application_id`. This is the same value as
- // reported by `cf app --guid`.
- CloudfoundryAppIDKey = attribute.Key("cloudfoundry.app.id")
-
- // CloudfoundryAppInstanceIDKey is the attribute Key conforming to the
- // "cloudfoundry.app.instance.id" semantic conventions. It represents the index
- // of the application instance. 0 when just one instance is active.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "0", "1"
- // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
- // .
- // It is used for logs and metrics emitted by CloudFoundry. It is
- // supposed to contain the application instance index for applications
- // deployed on the runtime.
- //
- // Application instrumentation should use the value from environment
- // variable `CF_INSTANCE_INDEX`.
- //
- // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
- CloudfoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id")
-
- // CloudfoundryAppNameKey is the attribute Key conforming to the
- // "cloudfoundry.app.name" semantic conventions. It represents the name of the
- // application.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "my-app-name"
- // Note: Application instrumentation should use the value from environment
- // variable `VCAP_APPLICATION.application_name`. This is the same value
- // as reported by `cf apps`.
- CloudfoundryAppNameKey = attribute.Key("cloudfoundry.app.name")
-
- // CloudfoundryOrgIDKey is the attribute Key conforming to the
- // "cloudfoundry.org.id" semantic conventions. It represents the guid of the
- // CloudFoundry org the application is running in.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: Development
- //
- // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
- // Note: Application instrumentation should use the value from environment
- // variable `VCAP_APPLICATION.org_id`. This is the same value as
- // reported by `cf org