diff --git a/client/go.mod b/client/go.mod
index 6ffb3f0a..12fbe237 100644
--- a/client/go.mod
+++ b/client/go.mod
@@ -11,33 +11,34 @@ require (
connectrpc.com/connect v1.19.1
github.com/docker/secrets-engine/x v0.0.20-do.not.use
github.com/stretchr/testify v1.11.1
- google.golang.org/protobuf v1.36.9
+ google.golang.org/protobuf v1.36.11
)
require (
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/otel v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/metric v1.38.0 // indirect
- go.opentelemetry.io/otel/sdk v1.38.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
- go.opentelemetry.io/otel/trace v1.38.0 // indirect
- go.opentelemetry.io/proto/otlp v1.7.1 // indirect
- golang.org/x/mod v0.26.0 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/sys v0.37.0 // indirect
- golang.org/x/text v0.28.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/grpc v1.75.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/otel v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect
+ go.opentelemetry.io/otel/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.40.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/trace v1.40.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.9.0 // indirect
+ golang.org/x/mod v0.31.0 // indirect
+ golang.org/x/net v0.49.0 // indirect
+ golang.org/x/sys v0.40.0 // indirect
+ golang.org/x/text v0.33.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/grpc v1.78.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/client/go.sum b/client/go.sum
index ce3386bf..870334a2 100644
--- a/client/go.sum
+++ b/client/go.sum
@@ -2,6 +2,8 @@ connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -15,58 +17,58 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
-go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
-go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
-go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
-go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 h1:NOyNnS19BF2SUDApbOKbDtWZ0IK7b8FJ2uAGdIWOGb0=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0/go.mod h1:VL6EgVikRLcJa9ftukrHu/ZkkhFBSo1lzvdBC9CF1ss=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
-golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
-google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
-google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
-google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
-google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/go.work.sum b/go.work.sum
index d79b2b53..4a304274 100644
--- a/go.work.sum
+++ b/go.work.sum
@@ -1,13 +1,21 @@
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
-connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
+cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
+codeberg.org/go-fonts/liberation v0.5.0/go.mod h1:zS/2e1354/mJ4pGzIIaEtm/59VFCFnYC7YV6YdGl5GU=
+codeberg.org/go-latex/latex v0.1.0/go.mod h1:LA0q/AyWIYrqVd+A9Upkgsb+IqPcmSTKc9Dny04MHMw=
+codeberg.org/go-pdf/fpdf v0.10.0/go.mod h1:Y0DGRAdZ0OmnZPvjbMp/1bYxmIPxm0ws4tfoPOc4LjU=
+filippo.io/nistec v0.0.4/go.mod h1:PK/lw8I1gQT4hUML4QGaqljwdDaFcMyFKSXN7kjrtKI=
+git.sr.ht/~sbinet/gg v0.6.0/go.mod h1:uucygbfC9wVPQIfrmwM2et0imr8L7KQWywX0xpFMm94=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Benehiko/go-keychain/v2 v2.0.0/go.mod h1:Qs/Ke0XjDSN07acDqQ89qXyE/mVGmIqB1qpsyu+Th0Q=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -16,10 +24,12 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/clipperhouse/uax29/v2 v2.2.0 h1:ChwIKnQN3kcZteTXMgb1wztSgaU+ZemkgWdohwgs8tY=
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
@@ -40,7 +50,9 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz
github.com/docker/secrets-engine v0.0.7/go.mod h1:Gs9tyhVJC28cd8XEqGf0MSV4Qni+k+T2YE2LPxBxs+Y=
github.com/docker/secrets-engine/mysecret v0.0.12/go.mod h1:5Q47SUB+HGV+CdFuXBMqA00mf5ZuvWwKh+hcHF1NxSA=
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
+github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs=
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
+github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
@@ -49,13 +61,16 @@ github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQ
github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA=
+github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/goccmack/gocc v0.0.0-20230228185258-2292f9e40198/go.mod h1:DTh/Y2+NbnOVVoypCCQrovMPDKUGp4yZpSbWg5D0XIM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -64,9 +79,12 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
+github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -79,6 +97,7 @@ github.com/knqyf263/go-plugin v0.9.0/go.mod h1:2z5lCO1/pez6qGo8CvCxSlBFSEat4MEp1
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@@ -115,6 +134,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -124,7 +144,9 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
+github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -134,40 +156,96 @@ github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+C
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
+go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0/go.mod h1:VL6EgVikRLcJa9ftukrHu/ZkkhFBSo1lzvdBC9CF1ss=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs=
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
+golang.org/x/image v0.25.0/go.mod h1:tCAmOEGthTtkalusGp1g3xa2gke8J6c2N565dTyl9Rs=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
+golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
+golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
+golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b/go.mod h1:4ZwOYna0/zsOKwuR5X/m0QFOJpSZvAxFfkQT+Erd9D4=
+golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE=
+golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
+golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
+golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
+golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
+golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
+golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/plot v0.15.2/go.mod h1:DX+x+DWso3LTha+AdkJEv5Txvi+Tql3KAGkehP0/Ubg=
+google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
+google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
-google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
+google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
diff --git a/plugin/go.mod b/plugin/go.mod
index 3ab71133..4f22454e 100644
--- a/plugin/go.mod
+++ b/plugin/go.mod
@@ -12,34 +12,35 @@ require (
github.com/containerd/nri v0.10.0
github.com/docker/secrets-engine/x v0.0.20-do.not.use
github.com/stretchr/testify v1.11.1
- google.golang.org/protobuf v1.36.9
+ google.golang.org/protobuf v1.36.11
)
require (
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
github.com/hashicorp/yamux v0.1.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/otel v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/metric v1.38.0 // indirect
- go.opentelemetry.io/otel/sdk v1.38.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
- go.opentelemetry.io/otel/trace v1.38.0 // indirect
- go.opentelemetry.io/proto/otlp v1.7.1 // indirect
- golang.org/x/mod v0.26.0 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/sys v0.37.0 // indirect
- golang.org/x/text v0.28.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/grpc v1.75.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/otel v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect
+ go.opentelemetry.io/otel/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.40.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/trace v1.40.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.9.0 // indirect
+ golang.org/x/mod v0.31.0 // indirect
+ golang.org/x/net v0.49.0 // indirect
+ golang.org/x/sys v0.40.0 // indirect
+ golang.org/x/text v0.33.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/grpc v1.78.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/plugin/go.sum b/plugin/go.sum
index 5d83d2ee..d63cf439 100644
--- a/plugin/go.sum
+++ b/plugin/go.sum
@@ -2,6 +2,8 @@ connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/containerd/nri v0.10.0 h1:bt2NzfvlY6OJE0i+fB5WVeGQEycxY7iFVQpEbh7J3Go=
github.com/containerd/nri v0.10.0/go.mod h1:5VyvLa/4uL8FjyO8nis1UjbCutXDpngil17KvBSL6BU=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -17,8 +19,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8=
github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -27,50 +29,50 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
-go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
-go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
-go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
-go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 h1:NOyNnS19BF2SUDApbOKbDtWZ0IK7b8FJ2uAGdIWOGb0=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0/go.mod h1:VL6EgVikRLcJa9ftukrHu/ZkkhFBSo1lzvdBC9CF1ss=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
-golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
-google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
-google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
-google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
-google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/plugins/credentialhelper/go.mod b/plugins/credentialhelper/go.mod
index b68d9034..fbf498de 100644
--- a/plugins/credentialhelper/go.mod
+++ b/plugins/credentialhelper/go.mod
@@ -17,30 +17,31 @@ require (
require (
connectrpc.com/connect v1.19.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
github.com/hashicorp/yamux v0.1.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/otel v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/metric v1.38.0 // indirect
- go.opentelemetry.io/otel/sdk v1.38.0 // indirect
- go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect
- go.opentelemetry.io/otel/trace v1.38.0 // indirect
- go.opentelemetry.io/proto/otlp v1.7.1 // indirect
- golang.org/x/mod v0.26.0 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/sys v0.37.0 // indirect
- golang.org/x/text v0.28.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/grpc v1.75.0 // indirect
- google.golang.org/protobuf v1.36.9 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/otel v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect
+ go.opentelemetry.io/otel/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.40.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/trace v1.40.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.9.0 // indirect
+ golang.org/x/mod v0.31.0 // indirect
+ golang.org/x/net v0.49.0 // indirect
+ golang.org/x/sys v0.40.0 // indirect
+ golang.org/x/text v0.33.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/grpc v1.78.0 // indirect
+ google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/plugins/credentialhelper/go.sum b/plugins/credentialhelper/go.sum
index 46866604..e79ce671 100644
--- a/plugins/credentialhelper/go.sum
+++ b/plugins/credentialhelper/go.sum
@@ -2,6 +2,8 @@ connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/containerd/nri v0.10.0 h1:bt2NzfvlY6OJE0i+fB5WVeGQEycxY7iFVQpEbh7J3Go=
github.com/containerd/nri v0.10.0/go.mod h1:5VyvLa/4uL8FjyO8nis1UjbCutXDpngil17KvBSL6BU=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -21,8 +23,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8=
github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -31,50 +33,50 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
-go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
-go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
-go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
-go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 h1:NOyNnS19BF2SUDApbOKbDtWZ0IK7b8FJ2uAGdIWOGb0=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0/go.mod h1:VL6EgVikRLcJa9ftukrHu/ZkkhFBSo1lzvdBC9CF1ss=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
-golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
-google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
-google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
-google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
-google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/plugins/pass/go.mod b/plugins/pass/go.mod
index c324a96a..cab08064 100644
--- a/plugins/pass/go.mod
+++ b/plugins/pass/go.mod
@@ -12,41 +12,42 @@ require (
github.com/docker/secrets-engine/x v0.0.20-do.not.use
github.com/spf13/cobra v1.10.1
github.com/stretchr/testify v1.11.1
- go.opentelemetry.io/otel v1.38.0
- go.opentelemetry.io/otel/metric v1.38.0
- go.opentelemetry.io/otel/sdk/metric v1.38.0
- go.opentelemetry.io/otel/trace v1.38.0
- golang.org/x/term v0.34.0
+ go.opentelemetry.io/otel v1.40.0
+ go.opentelemetry.io/otel/metric v1.40.0
+ go.opentelemetry.io/otel/sdk/metric v1.40.0
+ go.opentelemetry.io/otel/trace v1.40.0
+ golang.org/x/term v0.39.0
)
require (
connectrpc.com/connect v1.19.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/danieljoos/wincred v1.2.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
github.com/hashicorp/yamux v0.1.2 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/spf13/pflag v1.0.9 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/sdk v1.38.0 // indirect
- go.opentelemetry.io/proto/otlp v1.7.1 // indirect
- golang.org/x/crypto v0.41.0 // indirect
- golang.org/x/mod v0.26.0 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/sys v0.37.0 // indirect
- golang.org/x/text v0.28.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/grpc v1.75.0 // indirect
- google.golang.org/protobuf v1.36.9 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.40.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.9.0 // indirect
+ golang.org/x/crypto v0.47.0 // indirect
+ golang.org/x/mod v0.31.0 // indirect
+ golang.org/x/net v0.49.0 // indirect
+ golang.org/x/sys v0.40.0 // indirect
+ golang.org/x/text v0.33.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/grpc v1.78.0 // indirect
+ google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/plugins/pass/go.sum b/plugins/pass/go.sum
index 52db4890..439482df 100644
--- a/plugins/pass/go.sum
+++ b/plugins/pass/go.sum
@@ -2,6 +2,8 @@ connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/containerd/nri v0.10.0 h1:bt2NzfvlY6OJE0i+fB5WVeGQEycxY7iFVQpEbh7J3Go=
github.com/containerd/nri v0.10.0/go.mod h1:5VyvLa/4uL8FjyO8nis1UjbCutXDpngil17KvBSL6BU=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@@ -24,8 +26,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8=
github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -36,8 +38,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
@@ -47,50 +49,50 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
-go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
-go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
-go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
-go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 h1:NOyNnS19BF2SUDApbOKbDtWZ0IK7b8FJ2uAGdIWOGb0=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0/go.mod h1:VL6EgVikRLcJa9ftukrHu/ZkkhFBSo1lzvdBC9CF1ss=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
-golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
-golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
-golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
-golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
+golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
-google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
-google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
-google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
-google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/store/go.mod b/store/go.mod
index 9ad548a6..f99d9848 100644
--- a/store/go.mod
+++ b/store/go.mod
@@ -8,7 +8,7 @@ go 1.25
replace github.com/docker/secrets-engine/x => ../x
require (
- filippo.io/age v1.2.1
+ filippo.io/age v1.3.1
github.com/cenkalti/backoff/v5 v5.0.3
github.com/danieljoos/wincred v1.2.3
github.com/docker/secrets-engine/x v0.0.20-do.not.use
@@ -16,18 +16,18 @@ require (
github.com/google/uuid v1.6.0
github.com/spf13/cobra v1.10.1
github.com/stretchr/testify v1.11.1
- golang.org/x/crypto v0.41.0
- golang.org/x/sys v0.37.0
- golang.org/x/text v0.28.0
+ golang.org/x/crypto v0.47.0
+ golang.org/x/sys v0.40.0
+ golang.org/x/text v0.33.0
)
require (
filippo.io/edwards25519 v1.1.0 // indirect
+ filippo.io/hpke v0.4.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/spf13/pflag v1.0.9 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/store/go.sum b/store/go.sum
index d6dd3f74..df8f9b99 100644
--- a/store/go.sum
+++ b/store/go.sum
@@ -1,9 +1,11 @@
-c2sp.org/CCTV/age v0.0.0-20240306222714-3ec4d716e805 h1:u2qwJeEvnypw+OCPUHmoZE3IqwfuN5kgDfo5MLzpNM0=
-c2sp.org/CCTV/age v0.0.0-20240306222714-3ec4d716e805/go.mod h1:FomMrUJ2Lxt5jCLmZkG3FHa72zUprnhd3v/Z18Snm4w=
-filippo.io/age v1.2.1 h1:X0TZjehAZylOIj4DubWYU1vWQxv9bJpo+Uu2/LGhi1o=
-filippo.io/age v1.2.1/go.mod h1:JL9ew2lTN+Pyft4RiNGguFfOpewKwSHm5ayKD/A4004=
+c2sp.org/CCTV/age v0.0.0-20251208015420-e9274a7bdbfd h1:ZLsPO6WdZ5zatV4UfVpr7oAwLGRZ+sebTUruuM4Ra3M=
+c2sp.org/CCTV/age v0.0.0-20251208015420-e9274a7bdbfd/go.mod h1:SrHC2C7r5GkDk8R+NFVzYy/sdj0Ypg9htaPXQq5Cqeo=
+filippo.io/age v1.3.1 h1:hbzdQOJkuaMEpRCLSN1/C5DX74RPcNCk6oqhKMXmZi0=
+filippo.io/age v1.3.1/go.mod h1:EZorDTYUxt836i3zdori5IJX/v2Lj6kWFU0cfh6C0D4=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+filippo.io/hpke v0.4.0 h1:p575VVQ6ted4pL+it6M00V/f2qTZITO0zgmdKCkd5+A=
+filippo.io/hpke v0.4.0/go.mod h1:EmAN849/P3qdeK+PCMkDpDm83vRHM5cDipBJ8xbQLVY=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@@ -29,8 +31,8 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
@@ -40,14 +42,14 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
-golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
-golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
-golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
+golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
diff --git a/vendor/filippo.io/age/AUTHORS b/vendor/filippo.io/age/AUTHORS
deleted file mode 100644
index 1fbcfc4c..00000000
--- a/vendor/filippo.io/age/AUTHORS
+++ /dev/null
@@ -1,6 +0,0 @@
-# This is the official list of age authors for copyright purposes.
-# To be included, send a change adding the individual or company
-# who owns a contribution's copyright.
-
-Google LLC
-Filippo Valsorda
diff --git a/vendor/filippo.io/age/LICENSE b/vendor/filippo.io/age/LICENSE
index 3b50c989..0080870b 100644
--- a/vendor/filippo.io/age/LICENSE
+++ b/vendor/filippo.io/age/LICENSE
@@ -1,4 +1,6 @@
Copyright 2019 The age Authors
+Copyright 2019 Google LLC
+Copyright 2022 Filippo Valsorda
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/filippo.io/age/README.md b/vendor/filippo.io/age/README.md
index 3d69d5be..acfa797c 100644
--- a/vendor/filippo.io/age/README.md
+++ b/vendor/filippo.io/age/README.md
@@ -12,7 +12,7 @@
age is a simple, modern and secure file encryption tool, format, and Go library.
-It features small explicit keys, no config options, and UNIX-style composability.
+It features small explicit keys, post-quantum support, no config options, and UNIX-style composability.
```
$ age-keygen -o key.txt
@@ -21,17 +21,17 @@ $ tar cvz ~/data | age -r age1ql3z7hjy54pw3hyww5ayyfg7zqgvc7w3j2elw8zmrj2kg5sfn9
$ age --decrypt -i key.txt data.tar.gz.age > data.tar.gz
```
-📜 The format specification is at [age-encryption.org/v1](https://age-encryption.org/v1). age was designed by [@Benjojo12](https://twitter.com/Benjojo12) and [@FiloSottile](https://twitter.com/FiloSottile).
-
-📬 Follow the maintenance of this project by subscribing to [Maintainer Dispatches](https://filippo.io/newsletter)!
+📜 The format specification is at [age-encryption.org/v1](https://age-encryption.org/v1). age was designed by [@benjojo](https://github.com/benjojo) and [@FiloSottile](https://github.com/FiloSottile).
🦀 An alternative interoperable Rust implementation is available at [github.com/str4d/rage](https://github.com/str4d/rage).
+🌍 [Typage](https://github.com/FiloSottile/typage) is a TypeScript implementation. It works in the browser, Node.js, Deno, and Bun.
+
🔑 Hardware PIV tokens such as YubiKeys are supported through the [age-plugin-yubikey](https://github.com/str4d/age-plugin-yubikey) plugin.
✨ For more plugins, implementations, tools, and integrations, check out the [awesome age](https://github.com/FiloSottile/awesome-age) list.
-💬 The author pronounces it `[aɡe̞]` [with a hard *g*](https://translate.google.com/?sl=it&text=aghe), like GIF, and is always spelled lowercase.
+💬 The author pronounces it `[aɡe̞]` [with a hard *g*](https://translate.google.com/?sl=it&text=aghe), like GIF, and it's always spelled lowercase.
## Installation
@@ -48,6 +48,12 @@ $ age --decrypt -i key.txt data.tar.gz.age > data.tar.gz
port install age
+
+ | Windows |
+
+ winget install --id FiloSottile.age
+ |
+
| Alpine Linux v3.15+ |
@@ -85,6 +91,12 @@ $ age --decrypt -i key.txt data.tar.gz.age > data.tar.gz
emerge app-crypt/age
|
+
+ | Guix System |
+
+ guix package -i age
+ |
+
| NixOS / Nix |
@@ -133,22 +145,18 @@ $ age --decrypt -i key.txt data.tar.gz.age > data.tar.gz
scoop bucket add extras && scoop install age
|
-
- | pkgx |
-
- pkgx install age
- |
-
On Windows, Linux, macOS, and FreeBSD you can use the pre-built binaries.
```
https://dl.filippo.io/age/latest?for=linux/amd64
-https://dl.filippo.io/age/v1.1.1?for=darwin/arm64
+https://dl.filippo.io/age/v1.3.0?for=darwin/arm64
...
```
+If you download the pre-built binaries, you can check their [Sigsum proofs](./SIGSUM.md).
+
If your system has [a supported version of Go](https://go.dev/dl/), you can build from source.
```
@@ -157,39 +165,6 @@ go install filippo.io/age/cmd/...@latest
Help from new packagers is very welcome.
-### Verifying the release signatures
-
-If you download the pre-built binaries, you can check their
-[Sigsum](https://www.sigsum.org) proofs, which are like signatures with extra
-transparency: you can cryptographically verify that every proof is logged in a
-public append-only log, so you can hold the age project accountable for every
-binary release we ever produced. This is similar to what the [Go Checksum
-Database](https://go.dev/blog/module-mirror-launch) provides.
-
-```
-cat << EOF > age-sigsum-key.pub
-ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM1WpnEswJLPzvXJDiswowy48U+G+G1kmgwUE2eaRHZG
-ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAz2WM5CyPLqiNjk7CLl4roDXwKhQ0QExXLebukZEZFS
-EOF
-cat << EOF > sigsum-trust-policy.txt
-log 154f49976b59ff09a123675f58cb3e346e0455753c3c3b15d465dcb4f6512b0b https://poc.sigsum.org/jellyfish
-witness poc.sigsum.org/nisse 1c25f8a44c635457e2e391d1efbca7d4c2951a0aef06225a881e46b98962ac6c
-witness rgdd.se/poc-witness 28c92a5a3a054d317c86fc2eeb6a7ab2054d6217100d0be67ded5b74323c5806
-group demo-quorum-rule all poc.sigsum.org/nisse rgdd.se/poc-witness
-quorum demo-quorum-rule
-EOF
-
-curl -JLO "https://dl.filippo.io/age/v1.2.0?for=darwin/arm64"
-curl -JLO "https://dl.filippo.io/age/v1.2.0?for=darwin/arm64&proof"
-
-go install sigsum.org/sigsum-go/cmd/sigsum-verify@v0.8.0
-sigsum-verify -k age-sigsum-key.pub -p sigsum-trust-policy.txt \
- age-v1.2.0-darwin-arm64.tar.gz.proof < age-v1.2.0-darwin-arm64.tar.gz
-```
-
-You can learn more about what's happening above in the [Sigsum
-docs](https://www.sigsum.org/getting-started/).
-
## Usage
For the full documentation, read [the age(1) man page](https://filippo.io/age/age.1).
@@ -254,6 +229,28 @@ $ age -R recipients.txt example.jpg > example.jpg.age
If the argument to `-R` (or `-i`) is `-`, the file is read from standard input.
+### Post-quantum keys
+
+To generate hybrid post-quantum keys, which are secure against future quantum
+computer attacks, use the `-pq` flag with `age-keygen`. This may become the
+default in the future.
+
+Post-quantum identities start with `AGE-SECRET-KEY-PQ-1...` and recipients with
+`age1pq1...`. The recipients are unfortunately ~2000 characters long.
+
+```
+$ age-keygen -pq -o key.txt
+$ age-keygen -y key.txt > recipient.txt
+$ age -R recipient.txt example.jpg > example.jpg.age
+$ age -d -i key.txt example.jpg.age > example.jpg
+```
+
+Support for post-quantum keys is built into age v1.3.0 and later. Alternatively,
+the `age-plugin-pq` binary can be installed and placed in `$PATH` to add support
+to any version and implementation of age that supports plugins. Recipients will
+work out of the box, while identities will have to be converted to plugin
+identities with `age-plugin-pq -identity`.
+
### Passphrases
Files can be encrypted with a passphrase by using `-p/--passphrase`. By default age will automatically generate a secure passphrase. Passphrase protected files are automatically detected at decrypt time.
@@ -302,3 +299,28 @@ $ curl https://github.com/benjojo.keys | age -R - example.jpg > example.jpg.age
```
Keep in mind that people might not protect SSH keys long-term, since they are revokable when used only for authentication, and that SSH keys held on YubiKeys can't be used to decrypt files.
+
+### Inspecting encrypted files
+
+The `age-inspect` command can display metadata about an encrypted file without decrypting it, including the recipient types, whether it uses post-quantum encryption, and the payload size.
+
+```
+$ age-inspect secrets.age
+secrets.age is an age file, version "age-encryption.org/v1".
+
+This file is encrypted to the following recipient types:
+ - "mlkem768x25519"
+
+This file uses post-quantum encryption.
+
+Size breakdown (assuming it decrypts successfully):
+
+ Header 1627 bytes
+ Encryption overhead 32 bytes
+ Payload 42 bytes
+ -------------------
+ Total 1701 bytes
+
+```
+
+For scripting, use `--json` to get machine-readable output.
diff --git a/vendor/filippo.io/age/SIGSUM.md b/vendor/filippo.io/age/SIGSUM.md
new file mode 100644
index 00000000..d8e984bf
--- /dev/null
+++ b/vendor/filippo.io/age/SIGSUM.md
@@ -0,0 +1,23 @@
+If you download the pre-built binaries of version v1.2.0+, you can check their
+[Sigsum](https://www.sigsum.org) proofs, which are like signatures with extra
+transparency: you can cryptographically verify that every proof is logged in a
+public append-only log, so the age project can be held accountable for every
+binary release we ever produced. This is similar to what the [Go Checksum
+Database](https://go.dev/blog/module-mirror-launch) provides.
+
+```
+cat << EOF > age-sigsum-key.pub
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM1WpnEswJLPzvXJDiswowy48U+G+G1kmgwUE2eaRHZG
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAz2WM5CyPLqiNjk7CLl4roDXwKhQ0QExXLebukZEZFS
+EOF
+
+curl -JLO "https://dl.filippo.io/age/v1.3.0?for=darwin/arm64"
+curl -JLO "https://dl.filippo.io/age/v1.3.0?for=darwin/arm64&proof"
+
+go install sigsum.org/sigsum-go/cmd/sigsum-verify@v0.13.1
+sigsum-verify -k age-sigsum-key.pub -P sigsum-generic-2025-1 \
+ age-v1.3.0-darwin-arm64.tar.gz.proof < age-v1.3.0-darwin-arm64.tar.gz
+```
+
+You can learn more about what's happening above in the [Sigsum
+docs](https://www.sigsum.org/getting-started/).
diff --git a/vendor/filippo.io/age/age.go b/vendor/filippo.io/age/age.go
index c9b17bc8..13a2802c 100644
--- a/vendor/filippo.io/age/age.go
+++ b/vendor/filippo.io/age/age.go
@@ -5,10 +5,10 @@
// Package age implements file encryption according to the age-encryption.org/v1
// specification.
//
-// For most use cases, use the Encrypt and Decrypt functions with
-// X25519Recipient and X25519Identity. If passphrase encryption is required, use
-// ScryptRecipient and ScryptIdentity. For compatibility with existing SSH keys
-// use the filippo.io/age/agessh package.
+// For most use cases, use the [Encrypt] and [Decrypt] functions with
+// [HybridRecipient] and [HybridIdentity]. If passphrase encryption is
+// required, use [ScryptRecipient] and [ScryptIdentity]. For compatibility with
+// existing SSH keys use the filippo.io/age/agessh package.
//
// age encrypted files are binary and not malleable. For encoding them as text,
// use the filippo.io/age/armor package.
@@ -26,13 +26,13 @@
// There is no default path for age keys. Instead, they should be stored at
// application-specific paths. The CLI supports files where private keys are
// listed one per line, ignoring empty lines and lines starting with "#". These
-// files can be parsed with ParseIdentities.
+// files can be parsed with [ParseIdentities].
//
// When integrating age into a new system, it's recommended that you only
-// support X25519 keys, and not SSH keys. The latter are supported for manual
-// encryption operations. If you need to tie into existing key management
-// infrastructure, you might want to consider implementing your own Recipient
-// and Identity.
+// support native (X25519 and hybrid) keys, and not SSH keys. The latter are
+// supported for manual encryption operations. If you need to tie into existing
+// key management infrastructure, you might want to consider implementing your
+// own [Recipient] and [Identity].
//
// # Backwards compatibility
//
@@ -46,47 +46,47 @@
package age
import (
+ "bytes"
"crypto/hmac"
"crypto/rand"
"errors"
"fmt"
"io"
+ "slices"
"sort"
"filippo.io/age/internal/format"
"filippo.io/age/internal/stream"
)
-// An Identity is passed to Decrypt to unwrap an opaque file key from a
-// recipient stanza. It can be for example a secret key like X25519Identity, a
+// An Identity is passed to [Decrypt] to unwrap an opaque file key from a
+// recipient stanza. It can be for example a secret key like [HybridIdentity], a
// plugin, or a custom implementation.
-//
-// Unwrap must return an error wrapping ErrIncorrectIdentity if none of the
-// recipient stanzas match the identity, any other error will be considered
-// fatal.
-//
-// Most age API users won't need to interact with this directly, and should
-// instead pass Recipient implementations to Encrypt and Identity
-// implementations to Decrypt.
type Identity interface {
+ // Unwrap must return an error wrapping [ErrIncorrectIdentity] if none of
+ // the recipient stanzas match the identity, any other error will be
+ // considered fatal.
+ //
+ // Most age API users won't need to interact with this method directly, and
+ // should instead pass [Identity] implementations to [Decrypt].
Unwrap(stanzas []*Stanza) (fileKey []byte, err error)
}
+// ErrIncorrectIdentity is returned by [Identity.Unwrap] if none of the
+// recipient stanzas match the identity.
var ErrIncorrectIdentity = errors.New("incorrect identity for recipient block")
-// A Recipient is passed to Encrypt to wrap an opaque file key to one or more
-// recipient stanza(s). It can be for example a public key like X25519Recipient,
+// A Recipient is passed to [Encrypt] to wrap an opaque file key to one or more
+// recipient stanza(s). It can be for example a public key like [HybridRecipient],
// a plugin, or a custom implementation.
-//
-// Most age API users won't need to interact with this directly, and should
-// instead pass Recipient implementations to Encrypt and Identity
-// implementations to Decrypt.
type Recipient interface {
+ // Most age API users won't need to interact with this method directly, and
+ // should instead pass [Recipient] implementations to [Encrypt].
Wrap(fileKey []byte) ([]*Stanza, error)
}
-// RecipientWithLabels can be optionally implemented by a Recipient, in which
-// case Encrypt will use WrapWithLabels instead of Wrap.
+// RecipientWithLabels can be optionally implemented by a [Recipient], in which
+// case [Encrypt] will use WrapWithLabels instead of [Recipient.Wrap].
//
// Encrypt will succeed only if the labels returned by all the recipients
// (assuming the empty set for those that don't implement RecipientWithLabels)
@@ -103,9 +103,9 @@ type RecipientWithLabels interface {
// A Stanza is a section of the age header that encapsulates the file key as
// encrypted to a specific recipient.
//
-// Most age API users won't need to interact with this directly, and should
-// instead pass Recipient implementations to Encrypt and Identity
-// implementations to Decrypt.
+// Most age API users won't need to interact with this type directly, and should
+// instead pass [Recipient] implementations to [Encrypt] and [Identity]
+// implementations to [Decrypt].
type Stanza struct {
Type string
Args []string
@@ -115,35 +115,23 @@ type Stanza struct {
const fileKeySize = 16
const streamNonceSize = 16
-// Encrypt encrypts a file to one or more recipients.
-//
-// Writes to the returned WriteCloser are encrypted and written to dst as an age
-// file. Every recipient will be able to decrypt the file.
-//
-// The caller must call Close on the WriteCloser when done for the last chunk to
-// be encrypted and flushed to dst.
-func Encrypt(dst io.Writer, recipients ...Recipient) (io.WriteCloser, error) {
+func encryptHdr(fileKey []byte, recipients ...Recipient) (*format.Header, error) {
if len(recipients) == 0 {
return nil, errors.New("no recipients specified")
}
- fileKey := make([]byte, fileKeySize)
- if _, err := rand.Read(fileKey); err != nil {
- return nil, err
- }
-
hdr := &format.Header{}
var labels []string
for i, r := range recipients {
stanzas, l, err := wrapWithLabels(r, fileKey)
if err != nil {
- return nil, fmt.Errorf("failed to wrap key for recipient #%d: %v", i, err)
+ return nil, fmt.Errorf("failed to wrap key for recipient #%d: %w", i, err)
}
sort.Strings(l)
if i == 0 {
labels = l
} else if !slicesEqual(labels, l) {
- return nil, fmt.Errorf("incompatible recipients")
+ return nil, incompatibleLabelsError(labels, l)
}
for _, s := range stanzas {
hdr.Recipients = append(hdr.Recipients, (*format.Stanza)(s))
@@ -154,19 +142,62 @@ func Encrypt(dst io.Writer, recipients ...Recipient) (io.WriteCloser, error) {
} else {
hdr.MAC = mac
}
+ return hdr, nil
+}
+
+// Encrypt encrypts a file to one or more recipients. Every recipient will be
+// able to decrypt the file.
+//
+// Writes to the returned WriteCloser are encrypted and written to dst as an age
+// file. The caller must call Close on the WriteCloser when done for the last
+// chunk to be encrypted and flushed to dst.
+func Encrypt(dst io.Writer, recipients ...Recipient) (io.WriteCloser, error) {
+ fileKey := make([]byte, fileKeySize)
+ rand.Read(fileKey)
+
+ hdr, err := encryptHdr(fileKey, recipients...)
+ if err != nil {
+ return nil, err
+ }
if err := hdr.Marshal(dst); err != nil {
- return nil, fmt.Errorf("failed to write header: %v", err)
+ return nil, fmt.Errorf("failed to write header: %w", err)
}
nonce := make([]byte, streamNonceSize)
- if _, err := rand.Read(nonce); err != nil {
+ rand.Read(nonce)
+ if _, err := dst.Write(nonce); err != nil {
+ return nil, fmt.Errorf("failed to write nonce: %w", err)
+ }
+
+ return stream.NewEncryptWriter(streamKey(fileKey, nonce), dst)
+}
+
+// EncryptReader encrypts a file to one or more recipients. Every recipient will be
+// able to decrypt the file.
+//
+// Reads from the returned Reader produce the encrypted file, where the plaintext
+// is read from src.
+func EncryptReader(src io.Reader, recipients ...Recipient) (io.Reader, error) {
+ fileKey := make([]byte, fileKeySize)
+ rand.Read(fileKey)
+
+ hdr, err := encryptHdr(fileKey, recipients...)
+ if err != nil {
return nil, err
}
- if _, err := dst.Write(nonce); err != nil {
- return nil, fmt.Errorf("failed to write nonce: %v", err)
+ buf := &bytes.Buffer{}
+ if err := hdr.Marshal(buf); err != nil {
+ return nil, fmt.Errorf("failed to prepare header: %w", err)
}
- return stream.NewWriter(streamKey(fileKey, nonce), dst)
+ nonce := make([]byte, streamNonceSize)
+ rand.Read(nonce)
+
+ r, err := stream.NewEncryptReader(streamKey(fileKey, nonce), src)
+ if err != nil {
+ return nil, err
+ }
+ return io.MultiReader(buf, bytes.NewReader(nonce), r), nil
}
func wrapWithLabels(r Recipient, fileKey []byte) (s []*Stanza, labels []string, err error) {
@@ -189,39 +220,147 @@ func slicesEqual(s1, s2 []string) bool {
return true
}
-// NoIdentityMatchError is returned by Decrypt when none of the supplied
+func incompatibleLabelsError(l1, l2 []string) error {
+ hasPQ1 := slices.Contains(l1, "postquantum")
+ hasPQ2 := slices.Contains(l2, "postquantum")
+ if hasPQ1 != hasPQ2 {
+ return fmt.Errorf("incompatible recipients: can't mix post-quantum and classic recipients, or the file would be vulnerable to quantum computers")
+ }
+ return fmt.Errorf("incompatible recipients: %q and %q can't be mixed", l1, l2)
+}
+
+// NoIdentityMatchError is returned by [Decrypt] when none of the supplied
// identities match the encrypted file.
type NoIdentityMatchError struct {
// Errors is a slice of all the errors returned to Decrypt by the Unwrap
- // calls it made. They all wrap ErrIncorrectIdentity.
+ // calls it made. They all wrap [ErrIncorrectIdentity].
Errors []error
+ // StanzaTypes are the first argument of each recipient stanza in the
+ // encrypted file's header.
+ StanzaTypes []string
}
-func (*NoIdentityMatchError) Error() string {
+func (e *NoIdentityMatchError) Error() string {
+ if len(e.Errors) == 1 {
+ return "identity did not match any of the recipients: " + e.Errors[0].Error()
+ }
return "no identity matched any of the recipients"
}
+func (e *NoIdentityMatchError) Unwrap() []error {
+ return e.Errors
+}
+
// Decrypt decrypts a file encrypted to one or more identities.
+// All identities will be tried until one successfully decrypts the file.
+// Native, non-interactive identities are tried before any other identities.
//
-// It returns a Reader reading the decrypted plaintext of the age file read
-// from src. All identities will be tried until one successfully decrypts the file.
+// Decrypt returns a Reader reading the decrypted plaintext of the age file read
+// from src. If no identity matches the encrypted file, the returned error will
+// be of type [NoIdentityMatchError].
func Decrypt(src io.Reader, identities ...Identity) (io.Reader, error) {
- if len(identities) == 0 {
- return nil, errors.New("no identities specified")
- }
-
hdr, payload, err := format.Parse(src)
if err != nil {
return nil, fmt.Errorf("failed to read header: %w", err)
}
+ fileKey, err := decryptHdr(hdr, identities...)
+ if err != nil {
+ return nil, err
+ }
+
+ nonce := make([]byte, streamNonceSize)
+ if _, err := io.ReadFull(payload, nonce); err != nil {
+ return nil, fmt.Errorf("failed to read nonce: %w", err)
+ }
+
+ return stream.NewDecryptReader(streamKey(fileKey, nonce), payload)
+}
+
+// DecryptReaderAt decrypts a file encrypted to one or more identities.
+// All identities will be tried until one successfully decrypts the file.
+// Native, non-interactive identities are tried before any other identities.
+//
+// DecryptReaderAt takes an underlying [io.ReaderAt] and its total encrypted
+// size, and returns a ReaderAt of the decrypted plaintext and the plaintext
+// size. These can be used for example to instantiate an [io.SectionReader],
+// which implements [io.Reader] and [io.Seeker], or for [zip.NewReader].
+// Note that ReaderAt by definition disregards the seek position of src.
+//
+// The ReadAt method of the returned ReaderAt can be called concurrently.
+// The ReaderAt will internally cache the most recently decrypted chunk.
+// DecryptReaderAt reads and decrypts the final chunk before returning,
+// to authenticate the plaintext size.
+//
+// If no identity matches the encrypted file, the returned error will be of
+// type [NoIdentityMatchError].
+func DecryptReaderAt(src io.ReaderAt, encryptedSize int64, identities ...Identity) (io.ReaderAt, int64, error) {
+ srcReader := io.NewSectionReader(src, 0, encryptedSize)
+ hdr, payload, err := format.Parse(srcReader)
+ if err != nil {
+ return nil, 0, fmt.Errorf("failed to read header: %w", err)
+ }
+ buf := &bytes.Buffer{}
+ if err := hdr.Marshal(buf); err != nil {
+ return nil, 0, fmt.Errorf("failed to serialize header: %w", err)
+ }
+
+ fileKey, err := decryptHdr(hdr, identities...)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ nonce := make([]byte, streamNonceSize)
+ if _, err := io.ReadFull(payload, nonce); err != nil {
+ return nil, 0, fmt.Errorf("failed to read nonce: %w", err)
+ }
+
+ payloadOffset := int64(buf.Len()) + int64(len(nonce))
+ payloadSize := encryptedSize - payloadOffset
+ plaintextSize, err := stream.PlaintextSize(payloadSize)
+ if err != nil {
+ return nil, 0, err
+ }
+ payloadReaderAt := io.NewSectionReader(src, payloadOffset, payloadSize)
+ r, err := stream.NewDecryptReaderAt(streamKey(fileKey, nonce), payloadReaderAt, payloadSize)
+ if err != nil {
+ return nil, 0, err
+ }
+ return r, plaintextSize, nil
+}
+
+func decryptHdr(hdr *format.Header, identities ...Identity) ([]byte, error) {
+ if len(identities) == 0 {
+ return nil, errors.New("no identities specified")
+ }
+ slices.SortStableFunc(identities, func(a, b Identity) int {
+ var aIsNative, bIsNative bool
+ switch a.(type) {
+ case *X25519Identity, *HybridIdentity, *ScryptIdentity:
+ aIsNative = true
+ }
+ switch b.(type) {
+ case *X25519Identity, *HybridIdentity, *ScryptIdentity:
+ bIsNative = true
+ }
+ if aIsNative && !bIsNative {
+ return -1
+ }
+ if !aIsNative && bIsNative {
+ return 1
+ }
+ return 0
+ })
+
stanzas := make([]*Stanza, 0, len(hdr.Recipients))
+ errNoMatch := &NoIdentityMatchError{}
for _, s := range hdr.Recipients {
+ errNoMatch.StanzaTypes = append(errNoMatch.StanzaTypes, s.Type)
stanzas = append(stanzas, (*Stanza)(s))
}
- errNoMatch := &NoIdentityMatchError{}
var fileKey []byte
for _, id := range identities {
+ var err error
fileKey, err = id.Unwrap(stanzas)
if errors.Is(err, ErrIncorrectIdentity) {
errNoMatch.Errors = append(errNoMatch.Errors, err)
@@ -243,12 +382,7 @@ func Decrypt(src io.Reader, identities ...Identity) (io.Reader, error) {
return nil, errors.New("bad header MAC")
}
- nonce := make([]byte, streamNonceSize)
- if _, err := io.ReadFull(payload, nonce); err != nil {
- return nil, fmt.Errorf("failed to read nonce: %w", err)
- }
-
- return stream.NewReader(streamKey(fileKey, nonce), payload)
+ return fileKey, nil
}
// multiUnwrap is a helper that implements Identity.Unwrap in terms of a
@@ -269,3 +403,56 @@ func multiUnwrap(unwrap func(*Stanza) ([]byte, error), stanzas []*Stanza) ([]byt
}
return nil, ErrIncorrectIdentity
}
+
+// ExtractHeader returns a detached header from the src file.
+//
+// The detached header can be decrypted with [DecryptHeader] (for example on a
+// different system, without sharing the ciphertext) and then the file key can
+// be used with [NewInjectedFileKeyIdentity].
+//
+// This is a low-level function that most users won't need.
+func ExtractHeader(src io.Reader) ([]byte, error) {
+ hdr, _, err := format.Parse(src)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read header: %w", err)
+ }
+ buf := &bytes.Buffer{}
+ if err := hdr.Marshal(buf); err != nil {
+ return nil, fmt.Errorf("failed to serialize header: %w", err)
+ }
+ return buf.Bytes(), nil
+}
+
+// DecryptHeader decrypts a detached header and returns a file key.
+//
+// The detached header can be produced by [ExtractHeader], and the
+// returned file key can be used with [NewInjectedFileKeyIdentity].
+//
+// This is a low-level function that most users won't need.
+// It is the caller's responsibility to keep track of what file the
+// returned file key decrypts, and to ensure the file key is not used
+// for any other purpose.
+func DecryptHeader(header []byte, identities ...Identity) ([]byte, error) {
+ hdr, _, err := format.Parse(bytes.NewReader(header))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read header: %w", err)
+ }
+ return decryptHdr(hdr, identities...)
+}
+
+type injectedFileKeyIdentity struct {
+ fileKey []byte
+}
+
+// NewInjectedFileKeyIdentity returns an [Identity] that always produces
+// a fixed file key, allowing the use of a file key obtained out-of-band,
+// for example via [DecryptHeader].
+//
+// This is a low-level function that most users won't need.
+func NewInjectedFileKeyIdentity(fileKey []byte) Identity {
+ return injectedFileKeyIdentity{fileKey}
+}
+
+func (i injectedFileKeyIdentity) Unwrap(stanzas []*Stanza) (fileKey []byte, err error) {
+ return i.fileKey, nil
+}
diff --git a/vendor/filippo.io/age/agessh/agessh.go b/vendor/filippo.io/age/agessh/agessh.go
index ec2ccdda..9af9a7c8 100644
--- a/vendor/filippo.io/age/agessh/agessh.go
+++ b/vendor/filippo.io/age/agessh/agessh.go
@@ -7,7 +7,7 @@
// encryption with age-encryption.org/v1.
//
// These recipient types should only be used for compatibility with existing
-// keys, and native X25519 keys should be preferred otherwise.
+// keys, and native keys should be preferred otherwise.
//
// Note that these recipient types are not anonymous: the encrypted message will
// include a short 32-bit ID of the public key.
diff --git a/vendor/filippo.io/age/internal/bech32/bech32.go b/vendor/filippo.io/age/internal/bech32/bech32.go
index 6f8a48cd..a5d83309 100644
--- a/vendor/filippo.io/age/internal/bech32/bech32.go
+++ b/vendor/filippo.io/age/internal/bech32/bech32.go
@@ -37,7 +37,7 @@ func polymod(values []byte) uint32 {
top := chk >> 25
chk = (chk & 0x1ffffff) << 5
chk = chk ^ uint32(v)
- for i := 0; i < 5; i++ {
+ for i := range 5 {
bit := top >> i & 1
if bit == 1 {
chk ^= generator[i]
diff --git a/vendor/filippo.io/age/internal/format/format.go b/vendor/filippo.io/age/internal/format/format.go
index 0f22e6fa..cc788c37 100644
--- a/vendor/filippo.io/age/internal/format/format.go
+++ b/vendor/filippo.io/age/internal/format/format.go
@@ -77,10 +77,7 @@ func (w *WrappedBase64Encoder) writeWrapped(p []byte) (int, error) {
panic("age: internal error: non-empty WrappedBase64Encoder.buf")
}
for len(p) > 0 {
- toWrite := ColumnsPerLine - (w.written % ColumnsPerLine)
- if toWrite > len(p) {
- toWrite = len(p)
- }
+ toWrite := min(ColumnsPerLine-(w.written%ColumnsPerLine), len(p))
n, _ := w.buf.Write(p[:toWrite])
w.written += n
p = p[n:]
@@ -228,7 +225,7 @@ func (e *ParseError) Unwrap() error {
return e.err
}
-func errorf(format string, a ...interface{}) error {
+func errorf(format string, a ...any) error {
return &ParseError{fmt.Errorf(format, a...)}
}
@@ -239,7 +236,9 @@ func Parse(input io.Reader) (*Header, io.Reader, error) {
rr := bufio.NewReader(input)
line, err := rr.ReadString('\n')
- if err != nil {
+ if err == io.EOF {
+ return nil, nil, errorf("file is empty")
+ } else if err != nil {
return nil, nil, errorf("failed to read intro: %w", err)
}
if line != intro {
diff --git a/vendor/filippo.io/age/internal/stream/stream.go b/vendor/filippo.io/age/internal/stream/stream.go
index 7551274b..967cdc21 100644
--- a/vendor/filippo.io/age/internal/stream/stream.go
+++ b/vendor/filippo.io/age/internal/stream/stream.go
@@ -6,17 +6,45 @@
package stream
import (
+ "bytes"
"crypto/cipher"
+ "encoding/binary"
"errors"
"fmt"
"io"
+ "sync/atomic"
"golang.org/x/crypto/chacha20poly1305"
)
const ChunkSize = 64 * 1024
-type Reader struct {
+func EncryptedChunkCount(encryptedSize int64) (int64, error) {
+ chunks := (encryptedSize + encChunkSize - 1) / encChunkSize
+
+ plaintextSize := encryptedSize - chunks*chacha20poly1305.Overhead
+ expChunks := (plaintextSize + ChunkSize - 1) / ChunkSize
+ // Empty plaintext, the only case that allows (and requires) an empty chunk.
+ if plaintextSize == 0 {
+ expChunks = 1
+ }
+ if expChunks != chunks {
+ return 0, fmt.Errorf("invalid encrypted payload size: %d", encryptedSize)
+ }
+
+ return chunks, nil
+}
+
+func PlaintextSize(encryptedSize int64) (int64, error) {
+ chunks, err := EncryptedChunkCount(encryptedSize)
+ if err != nil {
+ return 0, err
+ }
+ plaintextSize := encryptedSize - chunks*chacha20poly1305.Overhead
+ return plaintextSize, nil
+}
+
+type DecryptReader struct {
a cipher.AEAD
src io.Reader
@@ -32,18 +60,15 @@ const (
lastChunkFlag = 0x01
)
-func NewReader(key []byte, src io.Reader) (*Reader, error) {
+func NewDecryptReader(key []byte, src io.Reader) (*DecryptReader, error) {
aead, err := chacha20poly1305.New(key)
if err != nil {
return nil, err
}
- return &Reader{
- a: aead,
- src: src,
- }, nil
+ return &DecryptReader{a: aead, src: src}, nil
}
-func (r *Reader) Read(p []byte) (int, error) {
+func (r *DecryptReader) Read(p []byte) (int, error) {
if len(r.unread) > 0 {
n := copy(p, r.unread)
r.unread = r.unread[n:]
@@ -85,7 +110,7 @@ func (r *Reader) Read(p []byte) (int, error) {
// readChunk reads the next chunk of ciphertext from r.src and makes it available
// in r.unread. last is true if the chunk was marked as the end of the message.
// readChunk must not be called again after returning a last chunk or an error.
-func (r *Reader) readChunk() (last bool, err error) {
+func (r *DecryptReader) readChunk() (last bool, err error) {
if len(r.unread) != 0 {
panic("stream: internal error: readChunk called with dirty buffer")
}
@@ -118,7 +143,7 @@ func (r *Reader) readChunk() (last bool, err error) {
out, err = r.a.Open(outBuf, r.nonce[:], in, nil)
}
if err != nil {
- return false, errors.New("failed to decrypt and authenticate payload chunk")
+ return false, errors.New("failed to decrypt and authenticate payload chunk, file may be corrupted or tampered with")
}
incNonce(&r.nonce)
@@ -130,12 +155,17 @@ func incNonce(nonce *[chacha20poly1305.NonceSize]byte) {
for i := len(nonce) - 2; i >= 0; i-- {
nonce[i]++
if nonce[i] != 0 {
- break
- } else if i == 0 {
- // The counter is 88 bits, this is unreachable.
- panic("stream: chunk counter wrapped around")
+ return
}
}
+ // The counter is 88 bits, this is unreachable.
+ panic("stream: chunk counter wrapped around")
+}
+
+func nonceForChunk(chunkIndex int64) *[chacha20poly1305.NonceSize]byte {
+ var nonce [chacha20poly1305.NonceSize]byte
+ binary.BigEndian.PutUint64(nonce[3:11], uint64(chunkIndex))
+ return &nonce
}
func setLastChunkFlag(nonce *[chacha20poly1305.NonceSize]byte) {
@@ -146,30 +176,23 @@ func nonceIsZero(nonce *[chacha20poly1305.NonceSize]byte) bool {
return *nonce == [chacha20poly1305.NonceSize]byte{}
}
-type Writer struct {
- a cipher.AEAD
- dst io.Writer
- unwritten []byte // backed by buf
- buf [encChunkSize]byte
- nonce [chacha20poly1305.NonceSize]byte
- err error
+type EncryptWriter struct {
+ a cipher.AEAD
+ dst io.Writer
+ buf bytes.Buffer
+ nonce [chacha20poly1305.NonceSize]byte
+ err error
}
-func NewWriter(key []byte, dst io.Writer) (*Writer, error) {
+func NewEncryptWriter(key []byte, dst io.Writer) (*EncryptWriter, error) {
aead, err := chacha20poly1305.New(key)
if err != nil {
return nil, err
}
- w := &Writer{
- a: aead,
- dst: dst,
- }
- w.unwritten = w.buf[:0]
- return w, nil
+ return &EncryptWriter{a: aead, dst: dst}, nil
}
-func (w *Writer) Write(p []byte) (n int, err error) {
- // TODO: consider refactoring with a bytes.Buffer.
+func (w *EncryptWriter) Write(p []byte) (n int, err error) {
if w.err != nil {
return 0, w.err
}
@@ -179,12 +202,13 @@ func (w *Writer) Write(p []byte) (n int, err error) {
total := len(p)
for len(p) > 0 {
- freeBuf := w.buf[len(w.unwritten):ChunkSize]
- n := copy(freeBuf, p)
+ n := min(len(p), ChunkSize-w.buf.Len())
+ w.buf.Write(p[:n])
p = p[n:]
- w.unwritten = w.unwritten[:len(w.unwritten)+n]
- if len(w.unwritten) == ChunkSize && len(p) > 0 {
+ // Only flush if there's a full chunk with bytes still to write, or we
+ // can't know if this is the last chunk yet.
+ if w.buf.Len() == ChunkSize && len(p) > 0 {
if err := w.flushChunk(notLastChunk); err != nil {
w.err = err
return 0, err
@@ -195,7 +219,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
}
// Close flushes the last chunk. It does not close the underlying Writer.
-func (w *Writer) Close() error {
+func (w *EncryptWriter) Close() error {
if w.err != nil {
return w.err
}
@@ -214,17 +238,214 @@ const (
notLastChunk = false
)
-func (w *Writer) flushChunk(last bool) error {
- if !last && len(w.unwritten) != ChunkSize {
+func (w *EncryptWriter) flushChunk(last bool) error {
+ if !last && w.buf.Len() != ChunkSize {
panic("stream: internal error: flush called with partial chunk")
}
if last {
setLastChunkFlag(&w.nonce)
}
- buf := w.a.Seal(w.buf[:0], w.nonce[:], w.unwritten, nil)
- _, err := w.dst.Write(buf)
- w.unwritten = w.buf[:0]
+ w.buf.Grow(chacha20poly1305.Overhead)
+ ciphertext := w.a.Seal(w.buf.Bytes()[:0], w.nonce[:], w.buf.Bytes(), nil)
+ _, err := w.dst.Write(ciphertext)
incNonce(&w.nonce)
+ w.buf.Reset()
return err
}
+
+type EncryptReader struct {
+ a cipher.AEAD
+ src io.Reader
+
+ // The first ready bytes of buf are already encrypted. This may be less than
+ // buf.Len(), because we need to over-read to know if a chunk is the last.
+ ready int
+ buf bytes.Buffer
+
+ nonce [chacha20poly1305.NonceSize]byte
+ err error
+}
+
+func NewEncryptReader(key []byte, src io.Reader) (*EncryptReader, error) {
+ aead, err := chacha20poly1305.New(key)
+ if err != nil {
+ return nil, err
+ }
+ return &EncryptReader{a: aead, src: src}, nil
+}
+
+func (r *EncryptReader) Read(p []byte) (int, error) {
+ if r.ready > 0 {
+ n, err := r.buf.Read(p[:min(len(p), r.ready)])
+ r.ready -= n
+ return n, err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ if err := r.feedBuffer(); err != nil {
+ r.err = err
+ return 0, err
+ }
+
+ n, err := r.buf.Read(p[:min(len(p), r.ready)])
+ r.ready -= n
+ return n, err
+}
+
+// feedBuffer reads and encrypts the next chunk from r.src and appends it to
+// r.buf. It sets r.ready to the number of newly available bytes in r.buf.
+func (r *EncryptReader) feedBuffer() error {
+ if r.ready > 0 {
+ panic("stream: internal error: feedBuffer called with dirty buffer")
+ }
+
+ // CopyN will use r.buf.ReadFrom/WriteTo to fill the buffer directly.
+ // We need ChunkSize + 1 bytes to determine if this is the last chunk.
+ _, err := io.CopyN(&r.buf, r.src, int64(ChunkSize-r.buf.Len()+1))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ if last := r.buf.Len() <= ChunkSize; last {
+ setLastChunkFlag(&r.nonce)
+
+ // After Grow, we know r.buf.Bytes() has enough capacity for the
+ // overhead. We encrypt in place and then do a Write to include the
+ // overhead in the buffer.
+ r.buf.Grow(chacha20poly1305.Overhead)
+ plaintext := r.buf.Bytes()
+ r.a.Seal(plaintext[:0], r.nonce[:], plaintext, nil)
+ incNonce(&r.nonce)
+ r.buf.Write(plaintext[len(plaintext) : len(plaintext)+chacha20poly1305.Overhead])
+ r.ready = r.buf.Len()
+
+ r.err = io.EOF
+ return nil
+ }
+
+ // Same, but accounting for the tail byte which will remain unencrypted and
+ // needs to be shifted past the overhead.
+ if r.buf.Len() != ChunkSize+1 {
+ panic("stream: internal error: unexpected buffer length")
+ }
+ tailByte := r.buf.Bytes()[ChunkSize]
+ r.buf.Grow(chacha20poly1305.Overhead)
+ plaintext := r.buf.Bytes()[:ChunkSize]
+ r.a.Seal(plaintext[:0], r.nonce[:], plaintext, nil)
+ incNonce(&r.nonce)
+ r.buf.Write(plaintext[len(plaintext)+1 : len(plaintext)+chacha20poly1305.Overhead])
+ r.buf.WriteByte(tailByte)
+ r.ready = ChunkSize + chacha20poly1305.Overhead
+
+ return nil
+}
+
+type DecryptReaderAt struct {
+ a cipher.AEAD
+ src io.ReaderAt
+ size int64
+ chunks int64
+ cache atomic.Pointer[cachedChunk]
+}
+
+type cachedChunk struct {
+ off int64
+ data []byte
+}
+
+func NewDecryptReaderAt(key []byte, src io.ReaderAt, size int64) (*DecryptReaderAt, error) {
+ aead, err := chacha20poly1305.New(key)
+ if err != nil {
+ return nil, err
+ }
+
+ // Check that size is valid by decrypting the final chunk.
+ chunks, err := EncryptedChunkCount(size)
+ if err != nil {
+ return nil, err
+ }
+ finalChunkIndex := chunks - 1
+ finalChunkOff := finalChunkIndex * encChunkSize
+ finalChunkSize := size - finalChunkOff
+ finalChunk := make([]byte, finalChunkSize)
+ if _, err := src.ReadAt(finalChunk, finalChunkOff); err != nil {
+ return nil, fmt.Errorf("failed to read final chunk: %w", err)
+ }
+ nonce := nonceForChunk(finalChunkIndex)
+ setLastChunkFlag(nonce)
+ plaintext, err := aead.Open(finalChunk[:0], nonce[:], finalChunk, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decrypt and authenticate final chunk: %w", err)
+ }
+ cache := &cachedChunk{off: finalChunkOff, data: plaintext}
+
+ plaintextSize := size - chunks*chacha20poly1305.Overhead
+ r := &DecryptReaderAt{a: aead, src: src, size: plaintextSize, chunks: chunks}
+ r.cache.Store(cache)
+ return r, nil
+}
+
+func (r *DecryptReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
+ if off < 0 || off > r.size {
+ return 0, fmt.Errorf("offset out of range [0:%d]: %d", r.size, off)
+ }
+ if len(p) == 0 {
+ return 0, nil
+ }
+ var cacheUpdate *cachedChunk
+ chunk := make([]byte, encChunkSize)
+ for len(p) > 0 && off < r.size {
+ chunkIndex := off / ChunkSize
+ chunkOff := chunkIndex * encChunkSize
+ encSize := r.size + r.chunks*chacha20poly1305.Overhead
+ chunkSize := min(encSize-chunkOff, encChunkSize)
+
+ cached := r.cache.Load()
+ var plaintext []byte
+ if cached != nil && cached.off == chunkOff {
+ plaintext = cached.data
+ cacheUpdate = nil
+ } else {
+ nn, err := r.src.ReadAt(chunk[:chunkSize], chunkOff)
+ if err == io.EOF {
+ if int64(nn) != chunkSize {
+ err = io.ErrUnexpectedEOF
+ } else {
+ err = nil
+ }
+ }
+ if err != nil {
+ return n, fmt.Errorf("failed to read chunk at offset %d: %w", chunkOff, err)
+ }
+ nonce := nonceForChunk(chunkIndex)
+ if chunkIndex == r.chunks-1 {
+ setLastChunkFlag(nonce)
+ }
+ plaintext, err = r.a.Open(chunk[:0], nonce[:], chunk[:chunkSize], nil)
+ if err != nil {
+ return n, fmt.Errorf("failed to decrypt and authenticate chunk at offset %d: %w", chunkOff, err)
+ }
+ cacheUpdate = &cachedChunk{off: chunkOff, data: plaintext}
+ }
+
+ plainChunkOff := int(off - chunkIndex*ChunkSize)
+ copySize := min(len(plaintext)-plainChunkOff, len(p))
+ copy(p, plaintext[plainChunkOff:plainChunkOff+copySize])
+ p = p[copySize:]
+ off += int64(copySize)
+ n += copySize
+ }
+ if cacheUpdate != nil {
+ r.cache.Store(cacheUpdate)
+ }
+ if off == r.size {
+ return n, io.EOF
+ }
+ return n, nil
+}
diff --git a/vendor/filippo.io/age/parse.go b/vendor/filippo.io/age/parse.go
index 373d1a89..99c15d42 100644
--- a/vendor/filippo.io/age/parse.go
+++ b/vendor/filippo.io/age/parse.go
@@ -9,6 +9,7 @@ import (
"fmt"
"io"
"strings"
+ "unicode/utf8"
)
// ParseIdentities parses a file with one or more private key encodings, one per
@@ -16,10 +17,10 @@ import (
//
// This is the same syntax as the private key files accepted by the CLI, except
// the CLI also accepts SSH private keys, which are not recommended for the
-// average application.
+// average application, and plugins, which involve invoking external programs.
//
-// Currently, all returned values are of type *X25519Identity, but different
-// types might be returned in the future.
+// Currently, all returned values are of type *[X25519Identity] or
+// *[HybridIdentity], but different types might be returned in the future.
func ParseIdentities(f io.Reader) ([]Identity, error) {
const privateKeySizeLimit = 1 << 24 // 16 MiB
var ids []Identity
@@ -31,30 +32,45 @@ func ParseIdentities(f io.Reader) ([]Identity, error) {
if strings.HasPrefix(line, "#") || line == "" {
continue
}
- i, err := ParseX25519Identity(line)
+ if !utf8.ValidString(line) {
+ return nil, fmt.Errorf("identities file is not valid UTF-8")
+ }
+ i, err := parseIdentity(line)
if err != nil {
return nil, fmt.Errorf("error at line %d: %v", n, err)
}
ids = append(ids, i)
}
if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("failed to read secret keys file: %v", err)
+ return nil, fmt.Errorf("failed to read identities file: %v", err)
}
if len(ids) == 0 {
- return nil, fmt.Errorf("no secret keys found")
+ return nil, fmt.Errorf("no identities found")
}
return ids, nil
}
+func parseIdentity(arg string) (Identity, error) {
+ switch {
+ case strings.HasPrefix(arg, "AGE-SECRET-KEY-1"):
+ return ParseX25519Identity(arg)
+ case strings.HasPrefix(arg, "AGE-SECRET-KEY-PQ-1"):
+ return ParseHybridIdentity(arg)
+ default:
+ return nil, fmt.Errorf("unknown identity type: %q", arg)
+ }
+}
+
// ParseRecipients parses a file with one or more public key encodings, one per
// line. Empty lines and lines starting with "#" are ignored.
//
// This is the same syntax as the recipients files accepted by the CLI, except
// the CLI also accepts SSH recipients, which are not recommended for the
-// average application.
+// average application, tagged recipients, which have different privacy
+// properties, and plugins, which involve invoking external programs.
//
-// Currently, all returned values are of type *X25519Recipient, but different
-// types might be returned in the future.
+// Currently, all returned values are of type *[X25519Recipient] or
+// *[HybridRecipient] but different types might be returned in the future.
func ParseRecipients(f io.Reader) ([]Recipient, error) {
const recipientFileSizeLimit = 1 << 24 // 16 MiB
var recs []Recipient
@@ -66,11 +82,12 @@ func ParseRecipients(f io.Reader) ([]Recipient, error) {
if strings.HasPrefix(line, "#") || line == "" {
continue
}
- r, err := ParseX25519Recipient(line)
+ if !utf8.ValidString(line) {
+ return nil, fmt.Errorf("recipients file is not valid UTF-8")
+ }
+ r, err := parseRecipient(line)
if err != nil {
- // Hide the error since it might unintentionally leak the contents
- // of confidential files.
- return nil, fmt.Errorf("malformed recipient at line %d", n)
+ return nil, fmt.Errorf("error at line %d: %v", n, err)
}
recs = append(recs, r)
}
@@ -82,3 +99,14 @@ func ParseRecipients(f io.Reader) ([]Recipient, error) {
}
return recs, nil
}
+
+func parseRecipient(arg string) (Recipient, error) {
+ switch {
+ case strings.HasPrefix(arg, "age1pq1"):
+ return ParseHybridRecipient(arg)
+ case strings.HasPrefix(arg, "age1"):
+ return ParseX25519Recipient(arg)
+ default:
+ return nil, fmt.Errorf("unknown recipient type: %q", arg)
+ }
+}
diff --git a/vendor/filippo.io/age/pq.go b/vendor/filippo.io/age/pq.go
new file mode 100644
index 00000000..46a5b067
--- /dev/null
+++ b/vendor/filippo.io/age/pq.go
@@ -0,0 +1,181 @@
+// Copyright 2025 The age Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package age
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "filippo.io/age/internal/bech32"
+ "filippo.io/age/internal/format"
+ "filippo.io/hpke"
+ "golang.org/x/crypto/chacha20poly1305"
+)
+
+const pqLabel = "age-encryption.org/mlkem768x25519"
+
+// HybridRecipient is the standard age public key. Messages encrypted to
+// this recipient can be decrypted with the corresponding [HybridIdentity].
+//
+// This recipient is safe against future cryptographically-relevant quantum
+// computers, and can only be used along with other post-quantum recipients.
+//
+// This recipient is anonymous, in the sense that an attacker can't tell from
+// the message alone if it is encrypted to a certain recipient.
+type HybridRecipient struct {
+ pk hpke.PublicKey
+}
+
+var _ Recipient = &HybridRecipient{}
+
+// newHybridRecipient returns a new [HybridRecipient] from a raw HPKE public key.
+func newHybridRecipient(publicKey []byte) (*HybridRecipient, error) {
+ pk, err := hpke.MLKEM768X25519().NewPublicKey(publicKey)
+ if err != nil {
+ return nil, errors.New("invalid MLKEM768-X25519 public key")
+ }
+ return &HybridRecipient{pk: pk}, nil
+}
+
+// ParseHybridRecipient returns a new [HybridRecipient] from a Bech32 public key
+// encoding with the "age1pq1" prefix.
+func ParseHybridRecipient(s string) (*HybridRecipient, error) {
+ t, k, err := bech32.Decode(s)
+ if err != nil {
+ return nil, fmt.Errorf("malformed recipient %q: %v", s, err)
+ }
+ if t != "age1pq" {
+ return nil, fmt.Errorf("malformed recipient %q: invalid type %q", s, t)
+ }
+ r, err := newHybridRecipient(k)
+ if err != nil {
+ return nil, fmt.Errorf("malformed recipient %q: %v", s, err)
+ }
+ return r, nil
+}
+
+func (r *HybridRecipient) Wrap(fileKey []byte) ([]*Stanza, error) {
+ s, _, err := r.WrapWithLabels(fileKey)
+ return s, err
+}
+
+// WrapWithLabels implements [RecipientWithLabels], returning a single
+// "postquantum" label. This ensures a HybridRecipient can't be mixed with other
+// recipients that would defeat its post-quantum security.
+//
+// To unsafely bypass this restriction, wrap HybridRecipient in a [Recipient]
+// type that doesn't expose WrapWithLabels.
+func (r *HybridRecipient) WrapWithLabels(fileKey []byte) ([]*Stanza, []string, error) {
+ enc, s, err := hpke.NewSender(r.pk, hpke.HKDFSHA256(), hpke.ChaCha20Poly1305(), []byte(pqLabel))
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to set up HPKE sender: %v", err)
+ }
+ ct, err := s.Seal(nil, fileKey)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to encrypt file key: %v", err)
+ }
+
+ l := &Stanza{
+ Type: "mlkem768x25519",
+ Args: []string{format.EncodeToString(enc)},
+ Body: ct,
+ }
+
+ return []*Stanza{l}, []string{"postquantum"}, nil
+}
+
+// String returns the Bech32 public key encoding of r.
+func (r *HybridRecipient) String() string {
+ s, _ := bech32.Encode("age1pq", r.pk.Bytes())
+ return s
+}
+
+// HybridIdentity is the standard age private key, which can decrypt messages
+// encrypted to the corresponding [HybridRecipient].
+type HybridIdentity struct {
+ k hpke.PrivateKey
+}
+
+var _ Identity = &HybridIdentity{}
+
+// newHybridIdentity returns a new [HybridIdentity] from a raw HPKE private key.
+func newHybridIdentity(secretKey []byte) (*HybridIdentity, error) {
+ k, err := hpke.MLKEM768X25519().NewPrivateKey(secretKey)
+ if err != nil {
+ return nil, errors.New("invalid MLKEM768-X25519 secret key")
+ }
+ return &HybridIdentity{k: k}, nil
+}
+
+// GenerateHybridIdentity randomly generates a new [HybridIdentity].
+func GenerateHybridIdentity() (*HybridIdentity, error) {
+ k, err := hpke.MLKEM768X25519().GenerateKey()
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate post-quantum identity: %v", err)
+ }
+ return &HybridIdentity{k: k}, nil
+}
+
+// ParseHybridIdentity returns a new [HybridIdentity] from a Bech32 private key
+// encoding with the "AGE-SECRET-KEY-PQ-1" prefix.
+func ParseHybridIdentity(s string) (*HybridIdentity, error) {
+ t, k, err := bech32.Decode(s)
+ if err != nil {
+ return nil, fmt.Errorf("malformed secret key: %v", err)
+ }
+ if t != "AGE-SECRET-KEY-PQ-" {
+ return nil, fmt.Errorf("malformed secret key: unknown type %q", t)
+ }
+ r, err := newHybridIdentity(k)
+ if err != nil {
+ return nil, fmt.Errorf("malformed secret key: %v", err)
+ }
+ return r, nil
+}
+
+func (i *HybridIdentity) Unwrap(stanzas []*Stanza) ([]byte, error) {
+ return multiUnwrap(i.unwrap, stanzas)
+}
+
+func (i *HybridIdentity) unwrap(block *Stanza) ([]byte, error) {
+ if block.Type != "mlkem768x25519" {
+ return nil, ErrIncorrectIdentity
+ }
+ if len(block.Args) != 1 {
+ return nil, errors.New("invalid mlkem768x25519 recipient block")
+ }
+ enc, err := format.DecodeString(block.Args[0])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse mlkem768x25519 recipient: %v", err)
+ }
+ if len(block.Body) != fileKeySize+chacha20poly1305.Overhead {
+ return nil, errIncorrectCiphertextSize
+ }
+
+ r, err := hpke.NewRecipient(enc, i.k, hpke.HKDFSHA256(), hpke.ChaCha20Poly1305(), []byte(pqLabel))
+ if err != nil {
+ // MLKEM768-X25519 does implicit rejection, so a mismatched key does not
+ // hit this error path, but is only detected later when trying to open.
+ return nil, fmt.Errorf("invalid mlkem768x25519 recipient: %v", err)
+ }
+ fileKey, err := r.Open(nil, block.Body)
+ if err != nil {
+ return nil, ErrIncorrectIdentity
+ }
+ return fileKey, nil
+}
+
+// Recipient returns the public [HybridRecipient] value corresponding to i.
+func (i *HybridIdentity) Recipient() *HybridRecipient {
+ return &HybridRecipient{pk: i.k.PublicKey()}
+}
+
+// String returns the Bech32 private key encoding of i.
+func (i *HybridIdentity) String() string {
+ b, _ := i.k.Bytes()
+ s, _ := bech32.Encode("AGE-SECRET-KEY-PQ-", b)
+ return strings.ToUpper(s)
+}
diff --git a/vendor/filippo.io/age/scrypt.go b/vendor/filippo.io/age/scrypt.go
index 73d13b7f..46bbc219 100644
--- a/vendor/filippo.io/age/scrypt.go
+++ b/vendor/filippo.io/age/scrypt.go
@@ -27,7 +27,7 @@ const scryptLabel = "age-encryption.org/v1/scrypt"
// for the same file.
//
// Its use is not recommended for automated systems, which should prefer
-// X25519Recipient.
+// [HybridRecipient] or [X25519Recipient].
type ScryptRecipient struct {
password []byte
workFactor int
@@ -150,14 +150,20 @@ func (i *ScryptIdentity) Unwrap(stanzas []*Stanza) ([]byte, error) {
return nil, errors.New("an scrypt recipient must be the only one")
}
}
- return multiUnwrap(i.unwrap, stanzas)
+ for _, s := range stanzas {
+ if s.Type != "scrypt" {
+ continue
+ }
+ return i.unwrap(s)
+ }
+ return nil, fmt.Errorf("%w: file is not passphrase-encrypted", ErrIncorrectIdentity)
}
var digitsRe = regexp.MustCompile(`^[1-9][0-9]*$`)
func (i *ScryptIdentity) unwrap(block *Stanza) ([]byte, error) {
if block.Type != "scrypt" {
- return nil, ErrIncorrectIdentity
+ return nil, errors.New("internal error: unwrap called on non-scrypt stanza")
}
if len(block.Args) != 2 {
return nil, errors.New("invalid scrypt recipient block")
@@ -200,7 +206,9 @@ func (i *ScryptIdentity) unwrap(block *Stanza) ([]byte, error) {
if err == errIncorrectCiphertextSize {
return nil, errors.New("invalid scrypt recipient block: incorrect file key size")
} else if err != nil {
- return nil, ErrIncorrectIdentity
+ // Wrap [ErrIncorrectIdentity] so that multiple passphrases can be tried
+ // in sequence by passing multiple [ScryptIdentity] values to [Decrypt].
+ return nil, fmt.Errorf("%w: incorrect passphrase", ErrIncorrectIdentity)
}
return fileKey, nil
}
diff --git a/vendor/filippo.io/age/x25519.go b/vendor/filippo.io/age/x25519.go
index 6cd87a8d..6c0814db 100644
--- a/vendor/filippo.io/age/x25519.go
+++ b/vendor/filippo.io/age/x25519.go
@@ -21,8 +21,9 @@ import (
const x25519Label = "age-encryption.org/v1/X25519"
-// X25519Recipient is the standard age public key. Messages encrypted to this
-// recipient can be decrypted with the corresponding X25519Identity.
+// X25519Recipient is the standard age pre-quantum public key. Messages
+// encrypted to this recipient can be decrypted with the corresponding
+// [X25519Identity]. For post-quantum resistance, use [HybridRecipient].
//
// This recipient is anonymous, in the sense that an attacker can't tell from
// the message alone if it is encrypted to a certain recipient.
@@ -105,8 +106,9 @@ func (r *X25519Recipient) String() string {
return s
}
-// X25519Identity is the standard age private key, which can decrypt messages
-// encrypted to the corresponding X25519Recipient.
+// X25519Identity is the standard pre-quantum age private key, which can decrypt
+// messages encrypted to the corresponding [X25519Recipient]. For post-quantum
+// resistance, use [HybridIdentity].
type X25519Identity struct {
secretKey, ourPublicKey []byte
}
diff --git a/vendor/filippo.io/hpke/LICENSE b/vendor/filippo.io/hpke/LICENSE
new file mode 100644
index 00000000..2a7cf70d
--- /dev/null
+++ b/vendor/filippo.io/hpke/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/filippo.io/hpke/aead.go b/vendor/filippo.io/hpke/aead.go
new file mode 100644
index 00000000..1a606c68
--- /dev/null
+++ b/vendor/filippo.io/hpke/aead.go
@@ -0,0 +1,130 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpke
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "errors"
+ "fmt"
+
+ "golang.org/x/crypto/chacha20poly1305"
+)
+
+// The AEAD is one of the three components of an HPKE ciphersuite, implementing
+// symmetric encryption.
+type AEAD interface {
+ ID() uint16
+ keySize() int
+ nonceSize() int
+ aead(key []byte) (cipher.AEAD, error)
+}
+
+// NewAEAD returns the AEAD implementation for the given AEAD ID.
+//
+// Applications are encouraged to use specific implementations like [AES128GCM]
+// or [ChaCha20Poly1305] instead, unless runtime agility is required.
+func NewAEAD(id uint16) (AEAD, error) {
+ switch id {
+ case 0x0001: // AES-128-GCM
+ return AES128GCM(), nil
+ case 0x0002: // AES-256-GCM
+ return AES256GCM(), nil
+ case 0x0003: // ChaCha20Poly1305
+ return ChaCha20Poly1305(), nil
+ case 0xFFFF: // Export-only
+ return ExportOnly(), nil
+ default:
+ return nil, fmt.Errorf("unsupported AEAD %04x", id)
+ }
+}
+
+// AES128GCM returns an AES-128-GCM AEAD implementation.
+func AES128GCM() AEAD { return aes128GCM }
+
+// AES256GCM returns an AES-256-GCM AEAD implementation.
+func AES256GCM() AEAD { return aes256GCM }
+
+// ChaCha20Poly1305 returns a ChaCha20Poly1305 AEAD implementation.
+func ChaCha20Poly1305() AEAD { return chacha20poly1305AEAD }
+
+// ExportOnly returns a placeholder AEAD implementation that cannot encrypt or
+// decrypt, but only export secrets with [Sender.Export] or [Recipient.Export].
+//
+// When this is used, [Sender.Seal] and [Recipient.Open] return errors.
+func ExportOnly() AEAD { return exportOnlyAEAD{} }
+
+type aead struct {
+ nK int
+ nN int
+ new func([]byte) (cipher.AEAD, error)
+ id uint16
+}
+
+var aes128GCM = &aead{
+ nK: 128 / 8,
+ nN: 96 / 8,
+ new: newAESGCM,
+ id: 0x0001,
+}
+
+var aes256GCM = &aead{
+ nK: 256 / 8,
+ nN: 96 / 8,
+ new: newAESGCM,
+ id: 0x0002,
+}
+
+var chacha20poly1305AEAD = &aead{
+ nK: chacha20poly1305.KeySize,
+ nN: chacha20poly1305.NonceSize,
+ new: chacha20poly1305.New,
+ id: 0x0003,
+}
+
+func newAESGCM(key []byte) (cipher.AEAD, error) {
+ b, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+ return cipher.NewGCM(b)
+}
+
+func (a *aead) ID() uint16 {
+ return a.id
+}
+
+func (a *aead) aead(key []byte) (cipher.AEAD, error) {
+ if len(key) != a.nK {
+ return nil, errors.New("invalid key size")
+ }
+ return a.new(key)
+}
+
+func (a *aead) keySize() int {
+ return a.nK
+}
+
+func (a *aead) nonceSize() int {
+ return a.nN
+}
+
+type exportOnlyAEAD struct{}
+
+func (exportOnlyAEAD) ID() uint16 {
+ return 0xFFFF
+}
+
+func (exportOnlyAEAD) aead(key []byte) (cipher.AEAD, error) {
+ return nil, nil
+}
+
+func (exportOnlyAEAD) keySize() int {
+ return 0
+}
+
+func (exportOnlyAEAD) nonceSize() int {
+ return 0
+}
diff --git a/vendor/filippo.io/hpke/crypto/ecdh/interfaces.go b/vendor/filippo.io/hpke/crypto/ecdh/interfaces.go
new file mode 100644
index 00000000..606f9fce
--- /dev/null
+++ b/vendor/filippo.io/hpke/crypto/ecdh/interfaces.go
@@ -0,0 +1,15 @@
+// Package ecdh defines an additional interface that will be added to the
+// crypto/ecdh package in Go 1.26+.
+package ecdh
+
+import "crypto/ecdh"
+
+// KeyExchanger is an interface for an opaque private key that can be used for
+// key exchange operations. For example, an ECDH key kept in a hardware module.
+//
+// It is implemented by [ecdh.PrivateKey].
+type KeyExchanger interface {
+ PublicKey() *ecdh.PublicKey
+ Curve() ecdh.Curve
+ ECDH(*ecdh.PublicKey) ([]byte, error)
+}
diff --git a/vendor/filippo.io/hpke/crypto/ecdh/stubs.go b/vendor/filippo.io/hpke/crypto/ecdh/stubs.go
new file mode 100644
index 00000000..7039e002
--- /dev/null
+++ b/vendor/filippo.io/hpke/crypto/ecdh/stubs.go
@@ -0,0 +1,15 @@
+package ecdh
+
+import "crypto/ecdh"
+
+// This file contains stubs to allow importing only this package instead of
+// crypto/ecdh, to minimize the diff.
+
+type Curve = ecdh.Curve
+type PrivateKey = ecdh.PrivateKey
+type PublicKey = ecdh.PublicKey
+
+func X25519() Curve { return ecdh.X25519() }
+func P256() Curve { return ecdh.P256() }
+func P384() Curve { return ecdh.P384() }
+func P521() Curve { return ecdh.P521() }
diff --git a/vendor/filippo.io/hpke/crypto/interfaces.go b/vendor/filippo.io/hpke/crypto/interfaces.go
new file mode 100644
index 00000000..ae312af1
--- /dev/null
+++ b/vendor/filippo.io/hpke/crypto/interfaces.go
@@ -0,0 +1,68 @@
+// Package crypto defines additional interfaces that will be added to the
+// crypto package in Go 1.26+.
+package crypto
+
+import (
+ "crypto/ecdh"
+ "crypto/mlkem"
+)
+
+// KeyExchanger is an interface for an opaque private key that can be used for
+// key exchange operations. For example, an ECDH key kept in a hardware module.
+//
+// It is implemented by [ecdh.PrivateKey].
+type KeyExchanger interface {
+ PublicKey() *ecdh.PublicKey
+ Curve() ecdh.Curve
+ ECDH(*ecdh.PublicKey) ([]byte, error)
+}
+
+// Encapsulator is an interface for a public KEM key that can be used for
+// encapsulation operations.
+//
+// It is implemented, for example, by [crypto/mlkem.EncapsulationKey768].
+type Encapsulator interface {
+ Bytes() []byte
+ Encapsulate() (sharedKey, ciphertext []byte)
+}
+
+// Decapsulator is an interface for an opaque private KEM key that can be used for
+// decapsulation operations. For example, an ML-KEM key kept in a hardware module.
+//
+// It will be implemented by [crypto/mlkem.DecapsulationKey768] in Go 1.26+.
+// In the meantime, use [DecapsulatorFromDecapsulationKey768] and
+// [DecapsulatorFromDecapsulationKey1024].
+type Decapsulator interface {
+ Encapsulator() Encapsulator
+ Decapsulate(ciphertext []byte) (sharedKey []byte, err error)
+}
+
+// DecapsulatorFromDecapsulationKey768 wraps an ML-KEM-768 decapsulation key
+// into a [Decapsulator], until Go 1.26+ where [crypto/mlkem.DecapsulationKey768]
+// implements it natively.
+func DecapsulatorFromDecapsulationKey768(dk *mlkem.DecapsulationKey768) Decapsulator {
+ return &mlkem768Decapsulator{dk}
+}
+
+type mlkem768Decapsulator struct {
+ *mlkem.DecapsulationKey768
+}
+
+func (d *mlkem768Decapsulator) Encapsulator() Encapsulator {
+ return d.EncapsulationKey()
+}
+
+// DecapsulatorFromDecapsulationKey1024 wraps an ML-KEM-1024 decapsulation key
+// into a [Decapsulator], until Go 1.26+ where [crypto/mlkem.DecapsulationKey1024]
+// implements it natively.
+func DecapsulatorFromDecapsulationKey1024(dk *mlkem.DecapsulationKey1024) Decapsulator {
+ return &mlkem1024Decapsulator{dk}
+}
+
+type mlkem1024Decapsulator struct {
+ *mlkem.DecapsulationKey1024
+}
+
+func (d *mlkem1024Decapsulator) Encapsulator() Encapsulator {
+ return d.EncapsulationKey()
+}
diff --git a/vendor/filippo.io/hpke/hpke-pq.md b/vendor/filippo.io/hpke/hpke-pq.md
new file mode 100644
index 00000000..eef03701
--- /dev/null
+++ b/vendor/filippo.io/hpke/hpke-pq.md
@@ -0,0 +1,235 @@
+# HPKE Hybrid KEMs
+
+[filippo.io/hpke-pq](https://filippo.io/hpke-pq)
+
+This document is a simplified and self-contained implementation reference for
+the MLKEM768-X25519, MLKEM768-P256, and MLKEM1024-P384 hybrid HPKE KEMs,
+specified in [draft-ietf-hpke-pq-03][], [draft-irtf-cfrg-hybrid-kems-07][],
+[draft-irtf-cfrg-concrete-hybrid-kems-02][], and [draft-ietf-hpke-hpke-02].
+
+It compensates for the need to cross-reference four documents, with different
+nomenclature (including functions and components with the same name but
+different behavior), alternative irrelevant definitions (the UG, UK, and CK
+frameworks), and multiple KEM abstraction layers.
+
+## Conventions used in this document
+
+`||` denotes concatenation. `[N:M]` denotes the byte slice from index N (inclusive)
+to index M (exclusive). Strings quoted with `""` are encoded as ASCII. Values in
+code blocks are hex encoded byte strings. `random(N)` denotes N bytes of CSPRNG
+output. All lengths are in bytes.
+
+ML-KEM.KeyGen_internal, ML-KEM.Encaps, and ML-KEM.Decaps are defined in
+[FIPS 203][]. `SHAKE256(s, L)` is an invocation of `SHAKE256(s, 8*L)` defined in
+[FIPS 202][]. SHA3-256 is defined in [FIPS 202][].
+
+## KEM definitions
+
+| Parameter | MLKEM768-X25519 | MLKEM768-P256 | MLKEM1024-P384 |
+| ----------------- | ------------------ | ------------------ | ------------------ |
+| ML-KEM parameters | ML-KEM-768 | ML-KEM-768 | ML-KEM-1024 |
+| Group | Curve25519 | P-256 | P-384 |
+| KEM identifier | 0x647a | 0x0050 | 0x0051 |
+| Nsecret | 32 | 32 | 32 |
+| Nenc | 1120 | 1153 | 1665 |
+| Npk | 1216 | 1249 | 1665 |
+| Nsk | 32 | 32 | 32 |
+| Label | `"\.//^\"` | `"MLKEM768-P256"` | `"MLKEM1024-P384"` |
+| KEM.Nct | 1088 | 1088 | 1568 |
+| KEM.Nek | 1184 | 1184 | 1568 |
+| KEM.Nseed | 64 | 64 | 64 |
+| Group.Nelem | 32 | 65 | 97 |
+| Group.Nseed | 32 | 128 | 48 |
+| Group.Nscalar | N/A | 32 | 48 |
+
+The MLKEM768-X25519 Label is alternatively encoded as
+
+ 5c2e2f2f5e5c
+
+## KEM functions
+
+```
+def GenerateKeyPair():
+ seed = random(32)
+
+ ek_PQ, ek_T, _, _ = expandKey(seed)
+ ek = ek_PQ || ek_T
+
+ return (seed, ek)
+
+def DeriveKeyPair(ikm):
+ # SHAKE256.LabeledDerive is part of the single-stage KDF described in
+ # draft-ietf-hpke-hpke-02 and defined in draft-ietf-hpke-pq-03, but is
+ # reproduced below for convenience.
+ seed = SHAKE256.LabeledDerive(ikm, "DeriveKeyPair", "", 32)
+
+ ek_PQ, ek_T, _, _ = expandKey(seed)
+ ek = ek_PQ || ek_T
+
+ return (seed, ek)
+
+def Encaps(ek):
+ ek_PQ = ek[0 : KEM.Nek]
+ ek_T = ek[KEM.Nek : KEM.Nek + Group.Nelem]
+
+ ss_PQ, ct_PQ = ML-KEM.Encaps(ek_PQ)
+
+ sk_E = Group.RandomScalar(random(Group.Nseed))
+ ct_T = Group.Exp(Group.G, sk_E)
+ ss_T = Group.ElementToSharedSecret(Group.Exp(ek_T, sk_E))
+
+ ss = SHA3-256(ss_PQ || ss_T || ct_T || ek_T || Label)
+ ct = ct_PQ || ct_T
+
+ return (ss, ct)
+
+def Decaps(seed, ct):
+ ct_PQ = ct[0 : KEM.Nct]
+ ct_T = ct[KEM.Nct : KEM.Nct + Group.Nelem]
+
+ ek_PQ, ek_T, dk_PQ, dk_T = expandKey(seed)
+
+ ss_PQ = ML-KEM.Decaps(dk_PQ, ct_PQ)
+ ss_T = Group.ElementToSharedSecret(Group.Exp(ct_T, dk_T))
+
+ ss = SHA3-256(ss_PQ || ss_T || ct_T || ek_T || Label)
+
+ return ss
+
+def expandKey(seed):
+ seed_full = SHAKE256(seed, KEM.Nseed + Group.Nseed)
+ seed_PQ = seed_full[0 : KEM.Nseed]
+ seed_T = seed_full[KEM.Nseed : KEM.Nseed + Group.Nseed]
+
+ # Note that even if expandKey returns the semi-expanded ML-KEM decapsulation
+ # key dk_PQ to use FIPS 203 definitions, that format should be avoided and
+ # instead seed_PQ should be expanded directly into the implementation's
+ # internal ML-KEM private representation.
+ (ek_PQ, dk_PQ) = ML-KEM.KeyGen_internal(seed_PQ)
+ dk_T = Group.RandomScalar(seed_T)
+ ek_T = Group.Exp(Group.G, dk_T)
+
+ return (ek_PQ, ek_T, dk_PQ, dk_T)
+```
+
+There is no distinction between a private/public key and its serialization:
+there is no abstract key format, only byte strings. In practice, implementations
+will probably want to load keys into pairs of internal representations, and
+serialize them back to their byte string format when needed.
+
+The IETF/IRTF documents lack a specified way to turn a private key into public
+key, although it can be inferred from the key generation process. We define such
+a process here as `PrivateKeyToPublicKey`.
+
+```
+def PrivateKeyToPublicKey(seed):
+ ek_PQ, ek_T, _, _ = expandKey(seed)
+ ek = ek_PQ || ek_T
+
+ return ek
+```
+
+## Group definitions
+
+### Curve25519
+
+Group.Exp is the X25519 function defined in [RFC 7748][].
+
+Group.G is the canonical generator, which encodes to
+
+ 0900000000000000000000000000000000000000000000000000000000000000
+
+consistently with [RFC 7748, Section 4.1][] and [RFC 7748, Section 6.1][].
+
+Group.RandomScalar and Group.ElementToSharedSecret are the identity.
+
+### P-256 and P-384
+
+The NIST P-256 and P-384 elliptic curves are defined in [SP800-186][].
+
+`Group.Exp(p, x)` computes scalar multiplication between the input element p and
+the scalar x. The input element p and the output element have length Group.Nelem
+and are encoded in uncompressed representation using the
+Elliptic-Curve-Point-to-Octet-String and Octet-String-to-Elliptic-Curve-Point
+functions defined in [SEC 1, Version 2.0][]. The input scalar x has length
+Group.Nscalar and is encoded in big-endian representation using the I2OSP and
+OS2IP functions defined in [RFC 8017][].
+
+Group.G is the canonical generator, which encodes to
+
+ 046b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c29
+ 64fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5
+
+for P-256, and to
+
+ 04aa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab
+ 73617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f
+
+for P-384, consistently with [SP800-186][], Section 3.2.1.
+
+```
+def RandomScalar(seed):
+ start = 0
+ end = Nscalar
+ sk = seed[start : end]
+ while OS2IP(sk) == 0 || OS2IP(sk) >= OS2IP(Group.N):
+ start = end
+ end = end + Nscalar
+ if end > len(seed):
+ # This happens with cryptographically negligible probability.
+ # The chance of a single rejection is < 2^-32 for P-256 and
+ # < 2^-192 for P-384. The chance of reaching this is thus
+ # < 2^-128 for P-256 and < 2^-192 for P-384.
+ raise Exception("Rejection sampling failed")
+ sk = seed[start : end]
+ return sk
+```
+
+Group.N is the order of the curve's group, which encodes to
+
+ ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551
+
+for P-256, and to
+
+ ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973
+
+for P-384, consistently with [SP800-186][], Section 3.2.1.
+
+Group.ElementToSharedSecret encodes the input element as an X coordinate using
+the Field-Element-to-Octet-String function in [SEC 1, Version 2.0][].
+
+> Note that since the scalar x is always derived uniformly at random, the chance
+> of it being zero are cryptographically negligible. Moreover,
+> Octet-String-to-Elliptic-Curve-Point never decodes the point at infinity from
+> a string of Group.Nelem bytes. Since NIST P curves have prime order, this
+> means that the output of Group.Exp and input to Group.ElementToSharedSecret is
+> also never the point at infinity.
+
+## SHAKE256.LabeledDerive
+
+SHAKE256.LabeledDerive is used by DeriveKeyPair, and is part of the single-stage
+KDF specified across [draft-ietf-hpke-hpke-02][] and [draft-ietf-hpke-pq-03][],
+but is reproduced below for convenience.
+
+```
+def SHAKE256.LabeledDerive(ikm, label, context, L):
+ suite_id = concat("KEM", I2OSP(kem_id, 2))
+ prefixed_label = I2OSP(len(label), 2) || label
+ labeled_ikm = ikm || "HPKE-v1" || suite_id || prefixed_label || I2OSP(L, 2) || context
+ return SHAKE256(labeled_ikm, L)
+```
+
+I2OSP is defined in [RFC 8017][], and `kem_id` is the KEM identifier.
+
+[draft-ietf-hpke-hpke-02]: https://datatracker.ietf.org/doc/html/draft-ietf-hpke-hpke-02
+[draft-ietf-hpke-pq-03]: https://datatracker.ietf.org/doc/html/draft-ietf-hpke-pq-03
+[draft-irtf-cfrg-hybrid-kems-07]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hybrid-kems-07
+[draft-irtf-cfrg-concrete-hybrid-kems-02]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-concrete-hybrid-kems-02
+[RFC 7748]: https://rfc-editor.org/rfc/rfc7748.html
+[RFC 7748, Section 4.1]: https://rfc-editor.org/rfc/rfc7748.html#section-4.1
+[RFC 7748, Section 6.1]: https://rfc-editor.org/rfc/rfc7748.html#section-6.1
+[RFC 8017]: https://datatracker.ietf.org/doc/html/rfc8017
+[FIPS 202]: https://doi.org/10.6028/NIST.FIPS.202
+[FIPS 203]: https://doi.org/10.6028/NIST.FIPS.203
+[SP800-186]: https://doi.org/10.6028/NIST.SP.800-186
+[SEC 1, Version 2.0]: https://www.secg.org/sec1-v2.pdf
diff --git a/vendor/filippo.io/hpke/hpke.go b/vendor/filippo.io/hpke/hpke.go
new file mode 100644
index 00000000..a5f75991
--- /dev/null
+++ b/vendor/filippo.io/hpke/hpke.go
@@ -0,0 +1,263 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hpke implements Hybrid Public Key Encryption (HPKE) as defined in
+// [RFC 9180].
+//
+// [RFC 9180]: https://www.rfc-editor.org/rfc/rfc9180.html
+package hpke
+
+import (
+ "crypto/cipher"
+ "errors"
+
+ "filippo.io/hpke/internal/byteorder"
+)
+
+type context struct {
+ suiteID []byte
+
+ export func(string, uint16) ([]byte, error)
+
+ aead cipher.AEAD
+ baseNonce []byte
+ // seqNum starts at zero and is incremented for each Seal/Open call.
+ // 64 bits are enough not to overflow for 500 years at 1ns per operation.
+ seqNum uint64
+}
+
+// Sender is a sending HPKE context. It is instantiated with a specific KEM
+// encapsulation key (i.e. the public key), and it is stateful, incrementing the
+// nonce counter for each [Sender.Seal] call.
+type Sender struct {
+ *context
+}
+
+// Recipient is a receiving HPKE context. It is instantiated with a specific KEM
+// decapsulation key (i.e. the secret key), and it is stateful, incrementing the
+// nonce counter for each successful [Recipient.Open] call.
+type Recipient struct {
+ *context
+}
+
+func newContext(sharedSecret []byte, kemID uint16, kdf KDF, aead AEAD, info []byte) (*context, error) {
+ sid := suiteID(kemID, kdf.ID(), aead.ID())
+
+ if kdf.oneStage() {
+ secrets := make([]byte, 0, 2+2+len(sharedSecret))
+ secrets = byteorder.BEAppendUint16(secrets, 0) // empty psk
+ secrets = byteorder.BEAppendUint16(secrets, uint16(len(sharedSecret)))
+ secrets = append(secrets, sharedSecret...)
+
+ ksContext := make([]byte, 0, 1+2+2+len(info))
+ ksContext = append(ksContext, 0) // mode 0
+ ksContext = byteorder.BEAppendUint16(ksContext, 0) // empty psk_id
+ ksContext = byteorder.BEAppendUint16(ksContext, uint16(len(info)))
+ ksContext = append(ksContext, info...)
+
+ secret, err := kdf.labeledDerive(sid, secrets, "secret", ksContext,
+ uint16(aead.keySize()+aead.nonceSize()+kdf.size()))
+ if err != nil {
+ return nil, err
+ }
+ key := secret[:aead.keySize()]
+ baseNonce := secret[aead.keySize() : aead.keySize()+aead.nonceSize()]
+ expSecret := secret[aead.keySize()+aead.nonceSize():]
+
+ a, err := aead.aead(key)
+ if err != nil {
+ return nil, err
+ }
+ export := func(exporterContext string, length uint16) ([]byte, error) {
+ return kdf.labeledDerive(sid, expSecret, "sec", []byte(exporterContext), length)
+ }
+
+ return &context{
+ aead: a,
+ suiteID: sid,
+ export: export,
+ baseNonce: baseNonce,
+ }, nil
+ }
+
+ pskIDHash, err := kdf.labeledExtract(sid, nil, "psk_id_hash", nil)
+ if err != nil {
+ return nil, err
+ }
+ infoHash, err := kdf.labeledExtract(sid, nil, "info_hash", info)
+ if err != nil {
+ return nil, err
+ }
+ ksContext := append([]byte{0}, pskIDHash...)
+ ksContext = append(ksContext, infoHash...)
+
+ secret, err := kdf.labeledExtract(sid, sharedSecret, "secret", nil)
+ if err != nil {
+ return nil, err
+ }
+ key, err := kdf.labeledExpand(sid, secret, "key", ksContext, uint16(aead.keySize()))
+ if err != nil {
+ return nil, err
+ }
+ a, err := aead.aead(key)
+ if err != nil {
+ return nil, err
+ }
+ baseNonce, err := kdf.labeledExpand(sid, secret, "base_nonce", ksContext, uint16(aead.nonceSize()))
+ if err != nil {
+ return nil, err
+ }
+ expSecret, err := kdf.labeledExpand(sid, secret, "exp", ksContext, uint16(kdf.size()))
+ if err != nil {
+ return nil, err
+ }
+ export := func(exporterContext string, length uint16) ([]byte, error) {
+ return kdf.labeledExpand(sid, expSecret, "sec", []byte(exporterContext), length)
+ }
+
+ return &context{
+ aead: a,
+ suiteID: sid,
+ export: export,
+ baseNonce: baseNonce,
+ }, nil
+}
+
+// NewSender returns a sending HPKE context for the provided KEM encapsulation
+// key (i.e. the public key), and using the ciphersuite defined by the
+// combination of KEM, KDF, and AEAD.
+//
+// The info parameter is additional public information that must match between
+// sender and recipient.
+//
+// The returned enc ciphertext can be used to instantiate a matching receiving
+// HPKE context with the corresponding KEM decapsulation key.
+func NewSender(pk PublicKey, kdf KDF, aead AEAD, info []byte) (enc []byte, s *Sender, err error) {
+ sharedSecret, encapsulatedKey, err := pk.encap()
+ if err != nil {
+ return nil, nil, err
+ }
+ context, err := newContext(sharedSecret, pk.KEM().ID(), kdf, aead, info)
+ if err != nil {
+ return nil, nil, err
+ }
+ return encapsulatedKey, &Sender{context}, nil
+}
+
+// NewRecipient returns a receiving HPKE context for the provided KEM
+// decapsulation key (i.e. the secret key), and using the ciphersuite defined by
+// the combination of KEM, KDF, and AEAD.
+//
+// The enc parameter must have been produced by a matching sending HPKE context
+// with the corresponding KEM encapsulation key. The info parameter is
+// additional public information that must match between sender and recipient.
+func NewRecipient(enc []byte, k PrivateKey, kdf KDF, aead AEAD, info []byte) (*Recipient, error) {
+ sharedSecret, err := k.decap(enc)
+ if err != nil {
+ return nil, err
+ }
+ context, err := newContext(sharedSecret, k.KEM().ID(), kdf, aead, info)
+ if err != nil {
+ return nil, err
+ }
+ return &Recipient{context}, nil
+}
+
+// Seal encrypts the provided plaintext, optionally binding to the additional
+// public data aad.
+//
+// Seal uses incrementing counters for each call, and Open on the receiving side
+// must be called in the same order as Seal.
+func (s *Sender) Seal(aad, plaintext []byte) ([]byte, error) {
+ if s.aead == nil {
+ return nil, errors.New("export-only instantiation")
+ }
+ ciphertext := s.aead.Seal(nil, s.nextNonce(), plaintext, aad)
+ s.seqNum++
+ return ciphertext, nil
+}
+
+// Seal instantiates a single-use HPKE sending HPKE context like [NewSender],
+// and then encrypts the provided plaintext like [Sender.Seal] (with no aad).
+// Seal returns the concatenation of the encapsulated key and the ciphertext.
+func Seal(pk PublicKey, kdf KDF, aead AEAD, info, plaintext []byte) ([]byte, error) {
+ enc, s, err := NewSender(pk, kdf, aead, info)
+ if err != nil {
+ return nil, err
+ }
+ ct, err := s.Seal(nil, plaintext)
+ if err != nil {
+ return nil, err
+ }
+ return append(enc, ct...), nil
+}
+
+// Export produces a secret value derived from the shared key between sender and
+// recipient. length must be at most 65,535.
+func (s *Sender) Export(exporterContext string, length int) ([]byte, error) {
+ if length < 0 || length > 0xFFFF {
+ return nil, errors.New("invalid length")
+ }
+ return s.export(exporterContext, uint16(length))
+}
+
+// Open decrypts the provided ciphertext, optionally binding to the additional
+// public data aad, or returns an error if decryption fails.
+//
+// Open uses incrementing counters for each successful call, and must be called
+// in the same order as Seal on the sending side.
+func (r *Recipient) Open(aad, ciphertext []byte) ([]byte, error) {
+ if r.aead == nil {
+ return nil, errors.New("export-only instantiation")
+ }
+ plaintext, err := r.aead.Open(nil, r.nextNonce(), ciphertext, aad)
+ if err != nil {
+ return nil, err
+ }
+ r.seqNum++
+ return plaintext, nil
+}
+
+// Open instantiates a single-use HPKE receiving HPKE context like [NewRecipient],
+// and then decrypts the provided ciphertext like [Recipient.Open] (with no aad).
+// ciphertext must be the concatenation of the encapsulated key and the actual ciphertext.
+func Open(k PrivateKey, kdf KDF, aead AEAD, info, ciphertext []byte) ([]byte, error) {
+ encSize := k.KEM().encSize()
+ if len(ciphertext) < encSize {
+ return nil, errors.New("ciphertext too short")
+ }
+ enc, ciphertext := ciphertext[:encSize], ciphertext[encSize:]
+ r, err := NewRecipient(enc, k, kdf, aead, info)
+ if err != nil {
+ return nil, err
+ }
+ return r.Open(nil, ciphertext)
+}
+
+// Export produces a secret value derived from the shared key between sender and
+// recipient. length must be at most 65,535.
+func (r *Recipient) Export(exporterContext string, length int) ([]byte, error) {
+ if length < 0 || length > 0xFFFF {
+ return nil, errors.New("invalid length")
+ }
+ return r.export(exporterContext, uint16(length))
+}
+
+func (ctx *context) nextNonce() []byte {
+ nonce := make([]byte, ctx.aead.NonceSize())
+ byteorder.BEPutUint64(nonce[len(nonce)-8:], ctx.seqNum)
+ for i := range ctx.baseNonce {
+ nonce[i] ^= ctx.baseNonce[i]
+ }
+ return nonce
+}
+
+func suiteID(kemID, kdfID, aeadID uint16) []byte {
+ suiteID := make([]byte, 0, 4+2+2+2)
+ suiteID = append(suiteID, []byte("HPKE")...)
+ suiteID = byteorder.BEAppendUint16(suiteID, kemID)
+ suiteID = byteorder.BEAppendUint16(suiteID, kdfID)
+ suiteID = byteorder.BEAppendUint16(suiteID, aeadID)
+ return suiteID
+}
diff --git a/vendor/filippo.io/hpke/internal/byteorder/byteorder.go b/vendor/filippo.io/hpke/internal/byteorder/byteorder.go
new file mode 100644
index 00000000..01500a87
--- /dev/null
+++ b/vendor/filippo.io/hpke/internal/byteorder/byteorder.go
@@ -0,0 +1,149 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package byteorder provides functions for decoding and encoding
+// little and big endian integer types from/to byte slices.
+package byteorder
+
+func LEUint16(b []byte) uint16 {
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint16(b[0]) | uint16(b[1])<<8
+}
+
+func LEPutUint16(b []byte, v uint16) {
+ _ = b[1] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+}
+
+func LEAppendUint16(b []byte, v uint16) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ )
+}
+
+func LEUint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func LEPutUint32(b []byte, v uint32) {
+ _ = b[3] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+}
+
+func LEAppendUint32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ )
+}
+
+func LEUint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+func LEPutUint64(b []byte, v uint64) {
+ _ = b[7] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+ b[4] = byte(v >> 32)
+ b[5] = byte(v >> 40)
+ b[6] = byte(v >> 48)
+ b[7] = byte(v >> 56)
+}
+
+func LEAppendUint64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56),
+ )
+}
+
+func BEUint16(b []byte) uint16 {
+ _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint16(b[1]) | uint16(b[0])<<8
+}
+
+func BEPutUint16(b []byte, v uint16) {
+ _ = b[1] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 8)
+ b[1] = byte(v)
+}
+
+func BEAppendUint16(b []byte, v uint16) []byte {
+ return append(b,
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func BEUint32(b []byte) uint32 {
+ _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
+}
+
+func BEPutUint32(b []byte, v uint32) {
+ _ = b[3] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 24)
+ b[1] = byte(v >> 16)
+ b[2] = byte(v >> 8)
+ b[3] = byte(v)
+}
+
+func BEAppendUint32(b []byte, v uint32) []byte {
+ return append(b,
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
+
+func BEUint64(b []byte) uint64 {
+ _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
+ uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
+}
+
+func BEPutUint64(b []byte, v uint64) {
+ _ = b[7] // early bounds check to guarantee safety of writes below
+ b[0] = byte(v >> 56)
+ b[1] = byte(v >> 48)
+ b[2] = byte(v >> 40)
+ b[3] = byte(v >> 32)
+ b[4] = byte(v >> 24)
+ b[5] = byte(v >> 16)
+ b[6] = byte(v >> 8)
+ b[7] = byte(v)
+}
+
+func BEAppendUint64(b []byte, v uint64) []byte {
+ return append(b,
+ byte(v>>56),
+ byte(v>>48),
+ byte(v>>40),
+ byte(v>>32),
+ byte(v>>24),
+ byte(v>>16),
+ byte(v>>8),
+ byte(v),
+ )
+}
diff --git a/vendor/filippo.io/hpke/kdf.go b/vendor/filippo.io/hpke/kdf.go
new file mode 100644
index 00000000..0563c625
--- /dev/null
+++ b/vendor/filippo.io/hpke/kdf.go
@@ -0,0 +1,156 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpke
+
+import (
+ "crypto/hkdf"
+ "crypto/sha256"
+ "crypto/sha3"
+ "crypto/sha512"
+ "errors"
+ "fmt"
+ "hash"
+
+ "filippo.io/hpke/internal/byteorder"
+)
+
+// The KDF is one of the three components of an HPKE ciphersuite, implementing
+// key derivation.
+type KDF interface {
+ ID() uint16
+ oneStage() bool
+ size() int // Nh
+ labeledDerive(suiteID, inputKey []byte, label string, context []byte, length uint16) ([]byte, error)
+ labeledExtract(suiteID, salt []byte, label string, inputKey []byte) ([]byte, error)
+ labeledExpand(suiteID, randomKey []byte, label string, info []byte, length uint16) ([]byte, error)
+}
+
+// NewKDF returns the KDF implementation for the given KDF ID.
+//
+// Applications are encouraged to use specific implementations like [HKDFSHA256]
+// instead, unless runtime agility is required.
+func NewKDF(id uint16) (KDF, error) {
+ switch id {
+ case 0x0001: // HKDF-SHA256
+ return HKDFSHA256(), nil
+ case 0x0002: // HKDF-SHA384
+ return HKDFSHA384(), nil
+ case 0x0003: // HKDF-SHA512
+ return HKDFSHA512(), nil
+ case 0x0010: // SHAKE128
+ return SHAKE128(), nil
+ case 0x0011: // SHAKE256
+ return SHAKE256(), nil
+ default:
+ return nil, fmt.Errorf("unsupported KDF %04x", id)
+ }
+}
+
+// HKDFSHA256 returns an HKDF-SHA256 KDF implementation.
+func HKDFSHA256() KDF { return hkdfSHA256 }
+
+// HKDFSHA384 returns an HKDF-SHA384 KDF implementation.
+func HKDFSHA384() KDF { return hkdfSHA384 }
+
+// HKDFSHA512 returns an HKDF-SHA512 KDF implementation.
+func HKDFSHA512() KDF { return hkdfSHA512 }
+
+type hkdfKDF struct {
+ hash func() hash.Hash
+ id uint16
+ nH int
+}
+
+var hkdfSHA256 = &hkdfKDF{hash: sha256.New, id: 0x0001, nH: sha256.Size}
+var hkdfSHA384 = &hkdfKDF{hash: sha512.New384, id: 0x0002, nH: sha512.Size384}
+var hkdfSHA512 = &hkdfKDF{hash: sha512.New, id: 0x0003, nH: sha512.Size}
+
+func (kdf *hkdfKDF) ID() uint16 {
+ return kdf.id
+}
+
+func (kdf *hkdfKDF) size() int {
+ return kdf.nH
+}
+
+func (kdf *hkdfKDF) oneStage() bool {
+ return false
+}
+
+func (kdf *hkdfKDF) labeledDerive(_, _ []byte, _ string, _ []byte, _ uint16) ([]byte, error) {
+ return nil, errors.New("hpke: internal error: labeledDerive called on two-stage KDF")
+}
+
+func (kdf *hkdfKDF) labeledExtract(suiteID []byte, salt []byte, label string, inputKey []byte) ([]byte, error) {
+ labeledIKM := make([]byte, 0, 7+len(suiteID)+len(label)+len(inputKey))
+ labeledIKM = append(labeledIKM, []byte("HPKE-v1")...)
+ labeledIKM = append(labeledIKM, suiteID...)
+ labeledIKM = append(labeledIKM, label...)
+ labeledIKM = append(labeledIKM, inputKey...)
+ return hkdf.Extract(kdf.hash, labeledIKM, salt)
+}
+
+func (kdf *hkdfKDF) labeledExpand(suiteID []byte, randomKey []byte, label string, info []byte, length uint16) ([]byte, error) {
+ labeledInfo := make([]byte, 0, 2+7+len(suiteID)+len(label)+len(info))
+ labeledInfo = byteorder.BEAppendUint16(labeledInfo, length)
+ labeledInfo = append(labeledInfo, []byte("HPKE-v1")...)
+ labeledInfo = append(labeledInfo, suiteID...)
+ labeledInfo = append(labeledInfo, label...)
+ labeledInfo = append(labeledInfo, info...)
+ return hkdf.Expand(kdf.hash, randomKey, string(labeledInfo), int(length))
+}
+
+// SHAKE128 returns a SHAKE128 KDF implementation.
+func SHAKE128() KDF {
+ return shake128KDF
+}
+
+// SHAKE256 returns a SHAKE256 KDF implementation.
+func SHAKE256() KDF {
+ return shake256KDF
+}
+
+type shakeKDF struct {
+ hash func() *sha3.SHAKE
+ id uint16
+ nH int
+}
+
+var shake128KDF = &shakeKDF{hash: sha3.NewSHAKE128, id: 0x0010, nH: 32}
+var shake256KDF = &shakeKDF{hash: sha3.NewSHAKE256, id: 0x0011, nH: 64}
+
+func (kdf *shakeKDF) ID() uint16 {
+ return kdf.id
+}
+
+func (kdf *shakeKDF) size() int {
+ return kdf.nH
+}
+
+func (kdf *shakeKDF) oneStage() bool {
+ return true
+}
+
+func (kdf *shakeKDF) labeledDerive(suiteID, inputKey []byte, label string, context []byte, length uint16) ([]byte, error) {
+ H := kdf.hash()
+ H.Write(inputKey)
+ H.Write([]byte("HPKE-v1"))
+ H.Write(suiteID)
+ H.Write([]byte{byte(len(label) >> 8), byte(len(label))})
+ H.Write([]byte(label))
+ H.Write([]byte{byte(length >> 8), byte(length)})
+ H.Write(context)
+ out := make([]byte, length)
+ H.Read(out)
+ return out, nil
+}
+
+func (kdf *shakeKDF) labeledExtract(_, _ []byte, _ string, _ []byte) ([]byte, error) {
+ return nil, errors.New("hpke: internal error: labeledExtract called on one-stage KDF")
+}
+
+func (kdf *shakeKDF) labeledExpand(_, _ []byte, _ string, _ []byte, _ uint16) ([]byte, error) {
+ return nil, errors.New("hpke: internal error: labeledExpand called on one-stage KDF")
+}
diff --git a/vendor/filippo.io/hpke/kem.go b/vendor/filippo.io/hpke/kem.go
new file mode 100644
index 00000000..5acd6a79
--- /dev/null
+++ b/vendor/filippo.io/hpke/kem.go
@@ -0,0 +1,383 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpke
+
+import (
+ "crypto/rand"
+ "errors"
+
+ "filippo.io/hpke/crypto/ecdh"
+ "filippo.io/hpke/internal/byteorder"
+)
+
+// A KEM is a Key Encapsulation Mechanism, one of the three components of an
+// HPKE ciphersuite.
+type KEM interface {
+ // ID returns the HPKE KEM identifier.
+ ID() uint16
+
+ // GenerateKey generates a new key pair.
+ GenerateKey() (PrivateKey, error)
+
+ // NewPublicKey deserializes a public key from bytes.
+ //
+ // It implements DeserializePublicKey, as defined in RFC 9180.
+ NewPublicKey([]byte) (PublicKey, error)
+
+ // NewPrivateKey deserializes a private key from bytes.
+ //
+ // It implements DeserializePrivateKey, as defined in RFC 9180.
+ NewPrivateKey([]byte) (PrivateKey, error)
+
+ // DeriveKeyPair derives a key pair from the given input keying material.
+ //
+ // It implements DeriveKeyPair, as defined in RFC 9180.
+ DeriveKeyPair(ikm []byte) (PrivateKey, error)
+
+ encSize() int
+}
+
+// NewKEM returns the KEM implementation for the given KEM ID.
+//
+// Applications are encouraged to use specific implementations like [DHKEM] or
+// [MLKEM768X25519] instead, unless runtime agility is required.
+func NewKEM(id uint16) (KEM, error) {
+ switch id {
+ case 0x0010: // DHKEM(P-256, HKDF-SHA256)
+ return DHKEM(ecdh.P256()), nil
+ case 0x0011: // DHKEM(P-384, HKDF-SHA384)
+ return DHKEM(ecdh.P384()), nil
+ case 0x0012: // DHKEM(P-521, HKDF-SHA512)
+ return DHKEM(ecdh.P521()), nil
+ case 0x0020: // DHKEM(X25519, HKDF-SHA256)
+ return DHKEM(ecdh.X25519()), nil
+ case 0x0041: // ML-KEM-768
+ return MLKEM768(), nil
+ case 0x0042: // ML-KEM-1024
+ return MLKEM1024(), nil
+ case 0x647a: // MLKEM768-X25519
+ return MLKEM768X25519(), nil
+ case 0x0050: // MLKEM768-P256
+ return MLKEM768P256(), nil
+ case 0x0051: // MLKEM1024-P384
+ return MLKEM1024P384(), nil
+ default:
+ return nil, errors.New("unsupported KEM")
+ }
+}
+
+// A PublicKey is an instantiation of a KEM (one of the three components of an
+// HPKE ciphersuite) with an encapsulation key (i.e. the public key).
+//
+// A PublicKey is usually obtained from a method of the corresponding [KEM] or
+// [PrivateKey], such as [KEM.NewPublicKey] or [PrivateKey.PublicKey].
+type PublicKey interface {
+ // KEM returns the instantiated KEM.
+ KEM() KEM
+
+ // Bytes returns the public key as the output of SerializePublicKey.
+ Bytes() []byte
+
+ encap() (sharedSecret, enc []byte, err error)
+}
+
+// A PrivateKey is an instantiation of a KEM (one of the three components of
+// an HPKE ciphersuite) with a decapsulation key (i.e. the secret key).
+//
+// A PrivateKey is usually obtained from a method of the corresponding [KEM],
+// such as [KEM.GenerateKey] or [KEM.NewPrivateKey].
+type PrivateKey interface {
+ // KEM returns the instantiated KEM.
+ KEM() KEM
+
+ // Bytes returns the private key as the output of SerializePrivateKey, as
+ // defined in RFC 9180.
+ //
+ // Note that for X25519 this might not match the input to NewPrivateKey.
+ // This is a requirement of RFC 9180, Section 7.1.2.
+ Bytes() ([]byte, error)
+
+ // PublicKey returns the corresponding PublicKey.
+ PublicKey() PublicKey
+
+ decap(enc []byte) (sharedSecret []byte, err error)
+}
+
+type dhKEM struct {
+ kdf KDF
+ id uint16
+ curve ecdh.Curve
+ Nsecret uint16
+ Nsk uint16
+ Nenc int
+}
+
+func (kem *dhKEM) extractAndExpand(dhKey, kemContext []byte) ([]byte, error) {
+ suiteID := byteorder.BEAppendUint16([]byte("KEM"), kem.id)
+ eaePRK, err := kem.kdf.labeledExtract(suiteID, nil, "eae_prk", dhKey)
+ if err != nil {
+ return nil, err
+ }
+ return kem.kdf.labeledExpand(suiteID, eaePRK, "shared_secret", kemContext, kem.Nsecret)
+}
+
+func (kem *dhKEM) ID() uint16 {
+ return kem.id
+}
+
+func (kem *dhKEM) encSize() int {
+ return kem.Nenc
+}
+
+var dhKEMP256 = &dhKEM{HKDFSHA256(), 0x0010, ecdh.P256(), 32, 32, 65}
+var dhKEMP384 = &dhKEM{HKDFSHA384(), 0x0011, ecdh.P384(), 48, 48, 97}
+var dhKEMP521 = &dhKEM{HKDFSHA512(), 0x0012, ecdh.P521(), 64, 66, 133}
+var dhKEMX25519 = &dhKEM{HKDFSHA256(), 0x0020, ecdh.X25519(), 32, 32, 32}
+
+// DHKEM returns a KEM implementing one of
+//
+// - DHKEM(P-256, HKDF-SHA256)
+// - DHKEM(P-384, HKDF-SHA384)
+// - DHKEM(P-521, HKDF-SHA512)
+// - DHKEM(X25519, HKDF-SHA256)
+//
+// depending on curve.
+func DHKEM(curve ecdh.Curve) KEM {
+ switch curve {
+ case ecdh.P256():
+ return dhKEMP256
+ case ecdh.P384():
+ return dhKEMP384
+ case ecdh.P521():
+ return dhKEMP521
+ case ecdh.X25519():
+ return dhKEMX25519
+ default:
+ // The set of ecdh.Curve implementations is closed, because the
+ // interface has unexported methods. Therefore, this default case is
+ // only hit if a new curve is added that DHKEM doesn't support.
+ return unsupportedCurveKEM{}
+ }
+}
+
+type unsupportedCurveKEM struct{}
+
+func (unsupportedCurveKEM) ID() uint16 {
+ return 0
+}
+func (unsupportedCurveKEM) GenerateKey() (PrivateKey, error) {
+ return nil, errors.New("unsupported curve")
+}
+func (unsupportedCurveKEM) NewPublicKey([]byte) (PublicKey, error) {
+ return nil, errors.New("unsupported curve")
+}
+func (unsupportedCurveKEM) NewPrivateKey([]byte) (PrivateKey, error) {
+ return nil, errors.New("unsupported curve")
+}
+func (unsupportedCurveKEM) DeriveKeyPair([]byte) (PrivateKey, error) {
+ return nil, errors.New("unsupported curve")
+}
+func (unsupportedCurveKEM) encSize() int {
+ return 0
+}
+
+type dhKEMPublicKey struct {
+ kem *dhKEM
+ pub *ecdh.PublicKey
+}
+
+// NewDHKEMPublicKey returns a PublicKey implementing
+//
+// - DHKEM(P-256, HKDF-SHA256)
+// - DHKEM(P-384, HKDF-SHA384)
+// - DHKEM(P-521, HKDF-SHA512)
+// - DHKEM(X25519, HKDF-SHA256)
+//
+// depending on the underlying curve of pub ([ecdh.X25519], [ecdh.P256],
+// [ecdh.P384], or [ecdh.P521]).
+//
+// This function is meant for applications that already have an instantiated
+// crypto/ecdh public key. Otherwise, applications should use the
+// [KEM.NewPublicKey] method of [DHKEM].
+func NewDHKEMPublicKey(pub *ecdh.PublicKey) (PublicKey, error) {
+ kem, ok := DHKEM(pub.Curve()).(*dhKEM)
+ if !ok {
+ return nil, errors.New("unsupported curve")
+ }
+ return &dhKEMPublicKey{
+ kem: kem,
+ pub: pub,
+ }, nil
+}
+
+func (kem *dhKEM) NewPublicKey(data []byte) (PublicKey, error) {
+ pub, err := kem.curve.NewPublicKey(data)
+ if err != nil {
+ return nil, err
+ }
+ return NewDHKEMPublicKey(pub)
+}
+
+func (pk *dhKEMPublicKey) KEM() KEM {
+ return pk.kem
+}
+
+func (pk *dhKEMPublicKey) Bytes() []byte {
+ return pk.pub.Bytes()
+}
+
+// testingOnlyGenerateKey is only used during testing, to provide
+// a fixed test key to use when checking the RFC 9180 vectors.
+var testingOnlyGenerateKey func() *ecdh.PrivateKey
+
+func (pk *dhKEMPublicKey) encap() (sharedSecret []byte, encapPub []byte, err error) {
+ privEph, err := pk.pub.Curve().GenerateKey(rand.Reader)
+ if err != nil {
+ return nil, nil, err
+ }
+ if testingOnlyGenerateKey != nil {
+ privEph = testingOnlyGenerateKey()
+ }
+ dhVal, err := privEph.ECDH(pk.pub)
+ if err != nil {
+ return nil, nil, err
+ }
+ encPubEph := privEph.PublicKey().Bytes()
+
+ encPubRecip := pk.pub.Bytes()
+ kemContext := append(encPubEph, encPubRecip...)
+ sharedSecret, err = pk.kem.extractAndExpand(dhVal, kemContext)
+ if err != nil {
+ return nil, nil, err
+ }
+ return sharedSecret, encPubEph, nil
+}
+
+type dhKEMPrivateKey struct {
+ kem *dhKEM
+ priv ecdh.KeyExchanger
+}
+
+// NewDHKEMPrivateKey returns a PrivateKey implementing
+//
+// - DHKEM(P-256, HKDF-SHA256)
+// - DHKEM(P-384, HKDF-SHA384)
+// - DHKEM(P-521, HKDF-SHA512)
+// - DHKEM(X25519, HKDF-SHA256)
+//
+// depending on the underlying curve of priv ([ecdh.X25519], [ecdh.P256],
+// [ecdh.P384], or [ecdh.P521]).
+//
+// This function is meant for applications that already have an instantiated
+// crypto/ecdh private key, or another implementation of a [ecdh.KeyExchanger]
+// (e.g. a hardware key). Otherwise, applications should use the
+// [KEM.NewPrivateKey] method of [DHKEM].
+func NewDHKEMPrivateKey(priv ecdh.KeyExchanger) (PrivateKey, error) {
+ kem, ok := DHKEM(priv.Curve()).(*dhKEM)
+ if !ok {
+ return nil, errors.New("unsupported curve")
+ }
+ return &dhKEMPrivateKey{
+ kem: kem,
+ priv: priv,
+ }, nil
+}
+
+func (kem *dhKEM) GenerateKey() (PrivateKey, error) {
+ priv, err := kem.curve.GenerateKey(rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ return NewDHKEMPrivateKey(priv)
+}
+
+func (kem *dhKEM) NewPrivateKey(ikm []byte) (PrivateKey, error) {
+ priv, err := kem.curve.NewPrivateKey(ikm)
+ if err != nil {
+ return nil, err
+ }
+ return NewDHKEMPrivateKey(priv)
+}
+
+func (kem *dhKEM) DeriveKeyPair(ikm []byte) (PrivateKey, error) {
+ // DeriveKeyPair from RFC 9180 Section 7.1.3.
+ suiteID := byteorder.BEAppendUint16([]byte("KEM"), kem.id)
+ prk, err := kem.kdf.labeledExtract(suiteID, nil, "dkp_prk", ikm)
+ if err != nil {
+ return nil, err
+ }
+ if kem == dhKEMX25519 {
+ s, err := kem.kdf.labeledExpand(suiteID, prk, "sk", nil, kem.Nsk)
+ if err != nil {
+ return nil, err
+ }
+ return kem.NewPrivateKey(s)
+ }
+ var counter uint8
+ for counter < 4 {
+ s, err := kem.kdf.labeledExpand(suiteID, prk, "candidate", []byte{counter}, kem.Nsk)
+ if err != nil {
+ return nil, err
+ }
+ if kem == dhKEMP521 {
+ s[0] &= 0x01
+ }
+ r, err := kem.NewPrivateKey(s)
+ if err != nil {
+ counter++
+ continue
+ }
+ return r, nil
+ }
+ panic("chance of four rejections is < 2^-128")
+}
+
+func (k *dhKEMPrivateKey) KEM() KEM {
+ return k.kem
+}
+
+func (k *dhKEMPrivateKey) Bytes() ([]byte, error) {
+ // Bizarrely, RFC 9180, Section 7.1.2 says SerializePrivateKey MUST clamp
+ // the output, which I thought we all agreed to instead do as part of the DH
+ // function, letting private keys be random bytes.
+ //
+ // At the same time, it says DeserializePrivateKey MUST also clamp, implying
+ // that the input doesn't have to be clamped, so Bytes by spec doesn't
+ // necessarily match the NewPrivateKey input.
+ //
+ // I'm sure this will not lead to any unexpected behavior or interop issue.
+ priv, ok := k.priv.(*ecdh.PrivateKey)
+ if !ok {
+ return nil, errors.New("ecdh: private key does not support Bytes")
+ }
+ if k.kem == dhKEMX25519 {
+ b := priv.Bytes()
+ b[0] &= 248
+ b[31] &= 127
+ b[31] |= 64
+ return b, nil
+ }
+ return priv.Bytes(), nil
+}
+
+func (k *dhKEMPrivateKey) PublicKey() PublicKey {
+ return &dhKEMPublicKey{
+ kem: k.kem,
+ pub: k.priv.PublicKey(),
+ }
+}
+
+func (k *dhKEMPrivateKey) decap(encPubEph []byte) ([]byte, error) {
+ pubEph, err := k.priv.Curve().NewPublicKey(encPubEph)
+ if err != nil {
+ return nil, err
+ }
+ dhVal, err := k.priv.ECDH(pubEph)
+ if err != nil {
+ return nil, err
+ }
+ kemContext := append(encPubEph, k.priv.PublicKey().Bytes()...)
+ return k.kem.extractAndExpand(dhVal, kemContext)
+}
diff --git a/vendor/filippo.io/hpke/pq.go b/vendor/filippo.io/hpke/pq.go
new file mode 100644
index 00000000..760d182d
--- /dev/null
+++ b/vendor/filippo.io/hpke/pq.go
@@ -0,0 +1,535 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpke
+
+import (
+ "bytes"
+ "crypto/mlkem"
+ "crypto/rand"
+ "crypto/sha3"
+ "errors"
+
+ "filippo.io/hpke/crypto"
+ "filippo.io/hpke/crypto/ecdh"
+ "filippo.io/hpke/internal/byteorder"
+)
+
+var mlkem768X25519 = &hybridKEM{
+ id: 0x647a,
+ label: /**/ `\./` +
+ /* */ `/^\`,
+ curve: ecdh.X25519(),
+
+ curveSeedSize: 32,
+ curvePointSize: 32,
+ pqEncapsKeySize: mlkem.EncapsulationKeySize768,
+ pqCiphertextSize: mlkem.CiphertextSize768,
+
+ pqNewPublicKey: func(data []byte) (crypto.Encapsulator, error) {
+ return mlkem.NewEncapsulationKey768(data)
+ },
+ pqNewPrivateKey: func(data []byte) (crypto.Decapsulator, error) {
+ return wrapDecapsulator(mlkem.NewDecapsulationKey768(data))
+ },
+}
+
+// MLKEM768X25519 returns a KEM implementing MLKEM768-X25519 (a.k.a. X-Wing)
+// from draft-ietf-hpke-pq.
+func MLKEM768X25519() KEM {
+ return mlkem768X25519
+}
+
+var mlkem768P256 = &hybridKEM{
+ id: 0x0050,
+ label: "MLKEM768-P256",
+ curve: ecdh.P256(),
+
+ curveSeedSize: 32,
+ curvePointSize: 65,
+ pqEncapsKeySize: mlkem.EncapsulationKeySize768,
+ pqCiphertextSize: mlkem.CiphertextSize768,
+
+ pqNewPublicKey: func(data []byte) (crypto.Encapsulator, error) {
+ return mlkem.NewEncapsulationKey768(data)
+ },
+ pqNewPrivateKey: func(data []byte) (crypto.Decapsulator, error) {
+ return wrapDecapsulator(mlkem.NewDecapsulationKey768(data))
+ },
+}
+
+// MLKEM768P256 returns a KEM implementing MLKEM768-P256 from draft-ietf-hpke-pq.
+func MLKEM768P256() KEM {
+ return mlkem768P256
+}
+
+var mlkem1024P384 = &hybridKEM{
+ id: 0x0051,
+ label: "MLKEM1024-P384",
+ curve: ecdh.P384(),
+
+ curveSeedSize: 48,
+ curvePointSize: 97,
+ pqEncapsKeySize: mlkem.EncapsulationKeySize1024,
+ pqCiphertextSize: mlkem.CiphertextSize1024,
+
+ pqNewPublicKey: func(data []byte) (crypto.Encapsulator, error) {
+ return mlkem.NewEncapsulationKey1024(data)
+ },
+ pqNewPrivateKey: func(data []byte) (crypto.Decapsulator, error) {
+ return wrapDecapsulator(mlkem.NewDecapsulationKey1024(data))
+ },
+}
+
+// MLKEM1024P384 returns a KEM implementing MLKEM1024-P384 from draft-ietf-hpke-pq.
+func MLKEM1024P384() KEM {
+ return mlkem1024P384
+}
+
+type hybridKEM struct {
+ id uint16
+ label string
+ curve ecdh.Curve
+
+ curveSeedSize int
+ curvePointSize int
+ pqEncapsKeySize int
+ pqCiphertextSize int
+
+ pqNewPublicKey func(data []byte) (crypto.Encapsulator, error)
+ pqNewPrivateKey func(data []byte) (crypto.Decapsulator, error)
+}
+
+func (kem *hybridKEM) ID() uint16 {
+ return kem.id
+}
+
+func (kem *hybridKEM) encSize() int {
+ return kem.pqCiphertextSize + kem.curvePointSize
+}
+
+func (kem *hybridKEM) sharedSecret(ssPQ, ssT, ctT, ekT []byte) []byte {
+ h := sha3.New256()
+ h.Write(ssPQ)
+ h.Write(ssT)
+ h.Write(ctT)
+ h.Write(ekT)
+ h.Write([]byte(kem.label))
+ return h.Sum(nil)
+}
+
+type hybridPublicKey struct {
+ kem *hybridKEM
+ t *ecdh.PublicKey
+ pq crypto.Encapsulator
+}
+
+// NewHybridPublicKey returns a PublicKey implementing one of
+//
+// - MLKEM768-X25519 (a.k.a. X-Wing)
+// - MLKEM768-P256
+// - MLKEM1024-P384
+//
+// from draft-ietf-hpke-pq, depending on the underlying curve of t
+// ([ecdh.X25519], [ecdh.P256], or [ecdh.P384]) and the type of pq (either
+// *[mlkem.EncapsulationKey768] or *[mlkem.EncapsulationKey1024]).
+//
+// This function is meant for applications that already have instantiated
+// crypto/ecdh and crypto/mlkem public keys. Otherwise, applications should use
+// the [KEM.NewPublicKey] method of e.g. [MLKEM768X25519].
+func NewHybridPublicKey(pq crypto.Encapsulator, t *ecdh.PublicKey) (PublicKey, error) {
+ switch t.Curve() {
+ case ecdh.X25519():
+ if _, ok := pq.(*mlkem.EncapsulationKey768); !ok {
+ return nil, errors.New("invalid PQ KEM for X25519 hybrid")
+ }
+ return &hybridPublicKey{mlkem768X25519, t, pq}, nil
+ case ecdh.P256():
+ if _, ok := pq.(*mlkem.EncapsulationKey768); !ok {
+ return nil, errors.New("invalid PQ KEM for P-256 hybrid")
+ }
+ return &hybridPublicKey{mlkem768P256, t, pq}, nil
+ case ecdh.P384():
+ if _, ok := pq.(*mlkem.EncapsulationKey1024); !ok {
+ return nil, errors.New("invalid PQ KEM for P-384 hybrid")
+ }
+ return &hybridPublicKey{mlkem1024P384, t, pq}, nil
+ default:
+ return nil, errors.New("unsupported curve")
+ }
+}
+
+func (kem *hybridKEM) NewPublicKey(data []byte) (PublicKey, error) {
+ if len(data) != kem.pqEncapsKeySize+kem.curvePointSize {
+ return nil, errors.New("invalid public key size")
+ }
+ pq, err := kem.pqNewPublicKey(data[:kem.pqEncapsKeySize])
+ if err != nil {
+ return nil, err
+ }
+ k, err := kem.curve.NewPublicKey(data[kem.pqEncapsKeySize:])
+ if err != nil {
+ return nil, err
+ }
+ return NewHybridPublicKey(pq, k)
+}
+
+func (pk *hybridPublicKey) KEM() KEM {
+ return pk.kem
+}
+
+func (pk *hybridPublicKey) Bytes() []byte {
+ return append(pk.pq.Bytes(), pk.t.Bytes()...)
+}
+
+var testingOnlyEncapsulate func() (ss, ct []byte)
+
+func (pk *hybridPublicKey) encap() (sharedSecret []byte, encapPub []byte, err error) {
+ skE, err := pk.t.Curve().GenerateKey(rand.Reader)
+ if err != nil {
+ return nil, nil, err
+ }
+ if testingOnlyGenerateKey != nil {
+ skE = testingOnlyGenerateKey()
+ }
+ ssT, err := skE.ECDH(pk.t)
+ if err != nil {
+ return nil, nil, err
+ }
+ ctT := skE.PublicKey().Bytes()
+
+ ssPQ, ctPQ := pk.pq.Encapsulate()
+ if testingOnlyEncapsulate != nil {
+ ssPQ, ctPQ = testingOnlyEncapsulate()
+ }
+
+ ss := pk.kem.sharedSecret(ssPQ, ssT, ctT, pk.t.Bytes())
+ ct := append(ctPQ, ctT...)
+ return ss, ct, nil
+}
+
+type hybridPrivateKey struct {
+ kem *hybridKEM
+ seed []byte // can be nil
+ t ecdh.KeyExchanger
+ pq crypto.Decapsulator
+}
+
+// NewHybridPrivateKey returns a PrivateKey implementing
+//
+// - MLKEM768-X25519 (a.k.a. X-Wing)
+// - MLKEM768-P256
+// - MLKEM1024-P384
+//
+// from draft-ietf-hpke-pq, depending on the underlying curve of t
+// ([ecdh.X25519], [ecdh.P256], or [ecdh.P384]) and the type of pq.Encapsulator()
+// (either *[mlkem.EncapsulationKey768] or *[mlkem.EncapsulationKey1024]).
+//
+// This function is meant for applications that already have instantiated
+// crypto/ecdh and crypto/mlkem private keys, or another implementation of a
+// [ecdh.KeyExchanger] and [crypto.Decapsulator] (e.g. a hardware key).
+// Otherwise, applications should use the [KEM.NewPrivateKey] method of e.g.
+// [MLKEM768X25519].
+func NewHybridPrivateKey(pq crypto.Decapsulator, t ecdh.KeyExchanger) (PrivateKey, error) {
+ return newHybridPrivateKey(pq, t, nil)
+}
+
+func (kem *hybridKEM) GenerateKey() (PrivateKey, error) {
+ seed := make([]byte, 32)
+ rand.Read(seed)
+ return kem.NewPrivateKey(seed)
+}
+
+func (kem *hybridKEM) NewPrivateKey(priv []byte) (PrivateKey, error) {
+ if len(priv) != 32 {
+ return nil, errors.New("hpke: invalid hybrid KEM secret length")
+ }
+
+ s := sha3.NewSHAKE256()
+ s.Write(priv)
+
+ seedPQ := make([]byte, mlkem.SeedSize)
+ s.Read(seedPQ)
+ pq, err := kem.pqNewPrivateKey(seedPQ)
+ if err != nil {
+ return nil, err
+ }
+
+ seedT := make([]byte, kem.curveSeedSize)
+ for {
+ s.Read(seedT)
+ k, err := kem.curve.NewPrivateKey(seedT)
+ if err != nil {
+ continue
+ }
+ return newHybridPrivateKey(pq, k, priv)
+ }
+}
+
+func newHybridPrivateKey(pq crypto.Decapsulator, t ecdh.KeyExchanger, seed []byte) (PrivateKey, error) {
+ switch t.Curve() {
+ case ecdh.X25519():
+ if _, ok := pq.Encapsulator().(*mlkem.EncapsulationKey768); !ok {
+ return nil, errors.New("invalid PQ KEM for X25519 hybrid")
+ }
+ return &hybridPrivateKey{mlkem768X25519, bytes.Clone(seed), t, pq}, nil
+ case ecdh.P256():
+ if _, ok := pq.Encapsulator().(*mlkem.EncapsulationKey768); !ok {
+ return nil, errors.New("invalid PQ KEM for P-256 hybrid")
+ }
+ return &hybridPrivateKey{mlkem768P256, bytes.Clone(seed), t, pq}, nil
+ case ecdh.P384():
+ if _, ok := pq.Encapsulator().(*mlkem.EncapsulationKey1024); !ok {
+ return nil, errors.New("invalid PQ KEM for P-384 hybrid")
+ }
+ return &hybridPrivateKey{mlkem1024P384, bytes.Clone(seed), t, pq}, nil
+ default:
+ return nil, errors.New("unsupported curve")
+ }
+}
+
+func (kem *hybridKEM) DeriveKeyPair(ikm []byte) (PrivateKey, error) {
+ suiteID := byteorder.BEAppendUint16([]byte("KEM"), kem.id)
+ dk, err := SHAKE256().labeledDerive(suiteID, ikm, "DeriveKeyPair", nil, 32)
+ if err != nil {
+ return nil, err
+ }
+ return kem.NewPrivateKey(dk)
+}
+
+func (k *hybridPrivateKey) KEM() KEM {
+ return k.kem
+}
+
+func (k *hybridPrivateKey) Bytes() ([]byte, error) {
+ if k.seed == nil {
+ return nil, errors.New("private key seed not available")
+ }
+ return k.seed, nil
+}
+
+func (k *hybridPrivateKey) PublicKey() PublicKey {
+ return &hybridPublicKey{
+ kem: k.kem,
+ t: k.t.PublicKey(),
+ pq: k.pq.Encapsulator(),
+ }
+}
+
+func (k *hybridPrivateKey) decap(enc []byte) ([]byte, error) {
+ if len(enc) != k.kem.pqCiphertextSize+k.kem.curvePointSize {
+ return nil, errors.New("invalid encapsulated key size")
+ }
+ ctPQ, ctT := enc[:k.kem.pqCiphertextSize], enc[k.kem.pqCiphertextSize:]
+ ssPQ, err := k.pq.Decapsulate(ctPQ)
+ if err != nil {
+ return nil, err
+ }
+ pub, err := k.t.Curve().NewPublicKey(ctT)
+ if err != nil {
+ return nil, err
+ }
+ ssT, err := k.t.ECDH(pub)
+ if err != nil {
+ return nil, err
+ }
+ ss := k.kem.sharedSecret(ssPQ, ssT, ctT, k.t.PublicKey().Bytes())
+ return ss, nil
+}
+
+var mlkem768 = &mlkemKEM{
+ id: 0x0041,
+ ciphertextSize: mlkem.CiphertextSize768,
+ newPublicKey: func(data []byte) (crypto.Encapsulator, error) {
+ return mlkem.NewEncapsulationKey768(data)
+ },
+ newPrivateKey: func(data []byte) (crypto.Decapsulator, error) {
+ return wrapDecapsulator(mlkem.NewDecapsulationKey768(data))
+ },
+ generateKey: func() (crypto.Decapsulator, error) {
+ return wrapDecapsulator(mlkem.GenerateKey768())
+ },
+}
+
+// MLKEM768 returns a KEM implementing ML-KEM-768 from draft-ietf-hpke-pq.
+func MLKEM768() KEM {
+ return mlkem768
+}
+
+var mlkem1024 = &mlkemKEM{
+ id: 0x0042,
+ ciphertextSize: mlkem.CiphertextSize1024,
+ newPublicKey: func(data []byte) (crypto.Encapsulator, error) {
+ return mlkem.NewEncapsulationKey1024(data)
+ },
+ newPrivateKey: func(data []byte) (crypto.Decapsulator, error) {
+ return wrapDecapsulator(mlkem.NewDecapsulationKey1024(data))
+ },
+ generateKey: func() (crypto.Decapsulator, error) {
+ return wrapDecapsulator(mlkem.GenerateKey1024())
+ },
+}
+
+// MLKEM1024 returns a KEM implementing ML-KEM-1024 from draft-ietf-hpke-pq.
+func MLKEM1024() KEM {
+ return mlkem1024
+}
+
+type mlkemKEM struct {
+ id uint16
+ ciphertextSize int
+ newPublicKey func(data []byte) (crypto.Encapsulator, error)
+ newPrivateKey func(data []byte) (crypto.Decapsulator, error)
+ generateKey func() (crypto.Decapsulator, error)
+}
+
+func (kem *mlkemKEM) ID() uint16 {
+ return kem.id
+}
+
+func (kem *mlkemKEM) encSize() int {
+ return kem.ciphertextSize
+}
+
+type mlkemPublicKey struct {
+ kem *mlkemKEM
+ pq crypto.Encapsulator
+}
+
+// NewMLKEMPublicKey returns a KEMPublicKey implementing
+//
+// - ML-KEM-768
+// - ML-KEM-1024
+//
+// from draft-ietf-hpke-pq, depending on the type of pub
+// (*[mlkem.EncapsulationKey768] or *[mlkem.EncapsulationKey1024]).
+//
+// This function is meant for applications that already have an instantiated
+// crypto/mlkem public key. Otherwise, applications should use the
+// [KEM.NewPublicKey] method of e.g. [MLKEM768].
+func NewMLKEMPublicKey(pub crypto.Encapsulator) (PublicKey, error) {
+ switch pub.(type) {
+ case *mlkem.EncapsulationKey768:
+ return &mlkemPublicKey{mlkem768, pub}, nil
+ case *mlkem.EncapsulationKey1024:
+ return &mlkemPublicKey{mlkem1024, pub}, nil
+ default:
+ return nil, errors.New("unsupported public key type")
+ }
+}
+
+func (kem *mlkemKEM) NewPublicKey(data []byte) (PublicKey, error) {
+ pq, err := kem.newPublicKey(data)
+ if err != nil {
+ return nil, err
+ }
+ return NewMLKEMPublicKey(pq)
+}
+
+func (pk *mlkemPublicKey) KEM() KEM {
+ return pk.kem
+}
+
+func (pk *mlkemPublicKey) Bytes() []byte {
+ return pk.pq.Bytes()
+}
+
+func (pk *mlkemPublicKey) encap() (sharedSecret []byte, encapPub []byte, err error) {
+ ss, ct := pk.pq.Encapsulate()
+ if testingOnlyEncapsulate != nil {
+ ss, ct = testingOnlyEncapsulate()
+ }
+ return ss, ct, nil
+}
+
+type mlkemPrivateKey struct {
+ kem *mlkemKEM
+ pq crypto.Decapsulator
+}
+
+// NewMLKEMPrivateKey returns a KEMPrivateKey implementing
+//
+// - ML-KEM-768
+// - ML-KEM-1024
+//
+// from draft-ietf-hpke-pq, depending on the type of priv.Encapsulator()
+// (either *[mlkem.EncapsulationKey768] or *[mlkem.EncapsulationKey1024]).
+//
+// This function is meant for applications that already have an instantiated
+// crypto/mlkem private key. Otherwise, applications should use the
+// [KEM.NewPrivateKey] method of e.g. [MLKEM768].
+func NewMLKEMPrivateKey(priv crypto.Decapsulator) (PrivateKey, error) {
+ switch priv.Encapsulator().(type) {
+ case *mlkem.EncapsulationKey768:
+ return &mlkemPrivateKey{mlkem768, priv}, nil
+ case *mlkem.EncapsulationKey1024:
+ return &mlkemPrivateKey{mlkem1024, priv}, nil
+ default:
+ return nil, errors.New("unsupported public key type")
+ }
+}
+
+func (kem *mlkemKEM) GenerateKey() (PrivateKey, error) {
+ pq, err := kem.generateKey()
+ if err != nil {
+ return nil, err
+ }
+ return NewMLKEMPrivateKey(pq)
+}
+
+func (kem *mlkemKEM) NewPrivateKey(priv []byte) (PrivateKey, error) {
+ pq, err := kem.newPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ return NewMLKEMPrivateKey(pq)
+}
+
+func (kem *mlkemKEM) DeriveKeyPair(ikm []byte) (PrivateKey, error) {
+ suiteID := byteorder.BEAppendUint16([]byte("KEM"), kem.id)
+ dk, err := SHAKE256().labeledDerive(suiteID, ikm, "DeriveKeyPair", nil, 64)
+ if err != nil {
+ return nil, err
+ }
+ return kem.NewPrivateKey(dk)
+}
+
+func (k *mlkemPrivateKey) KEM() KEM {
+ return k.kem
+}
+
+func (k *mlkemPrivateKey) Bytes() ([]byte, error) {
+ pq, ok := k.pq.(interface {
+ Bytes() []byte
+ })
+ if !ok {
+ return nil, errors.New("private key seed not available")
+ }
+ return pq.Bytes(), nil
+}
+
+func (k *mlkemPrivateKey) PublicKey() PublicKey {
+ return &mlkemPublicKey{
+ kem: k.kem,
+ pq: k.pq.Encapsulator(),
+ }
+}
+
+func (k *mlkemPrivateKey) decap(enc []byte) ([]byte, error) {
+ return k.pq.Decapsulate(enc)
+}
+
+func wrapDecapsulator(dk any, err error) (crypto.Decapsulator, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch key := dk.(type) {
+ case *mlkem.DecapsulationKey768:
+ return crypto.DecapsulatorFromDecapsulationKey768(key), nil
+ case *mlkem.DecapsulationKey1024:
+ return crypto.DecapsulatorFromDecapsulationKey1024(key), nil
+ default:
+ return nil, errors.New("hpke: internal error: unknown decapsulation key type")
+ }
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
new file mode 100644
index 00000000..24b53065
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
new file mode 100644
index 00000000..33c88305
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -0,0 +1,74 @@
+# xxhash
+
+[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
+[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
+
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+ func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
+
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
+
+```
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
+- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh
new file mode 100644
index 00000000..94b9c443
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/testall.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
new file mode 100644
index 00000000..78bddf1c
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -0,0 +1,243 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array for the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
+
+// Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest with a zero seed.
+func New() *Digest {
+ return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
+ var d Digest
+ d.ResetWithSeed(seed)
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
+func (d *Digest) Reset() {
+ d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+ d.v1 = seed + prime1 + prime2
+ d.v2 = seed + prime2
+ d.v3 = seed
+ d.v4 = seed - prime1
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(memleft, b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ c := copy(memleft, b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[c:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ b = b[4:]
+ }
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint64(b, d.v1)
+ b = appendUint64(b, d.v2)
+ b = appendUint64(b, d.v3)
+ b = appendUint64(b, d.v4)
+ b = appendUint64(b, d.total)
+ b = append(b, d.mem[:d.n]...)
+ b = b[:len(b)+len(d.mem)-d.n]
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.v1 = consumeUint64(b)
+ b, d.v2 = consumeUint64(b)
+ b, d.v3 = consumeUint64(b)
+ b, d.v4 = consumeUint64(b)
+ b, d.total = consumeUint64(b)
+ copy(d.mem[:], b)
+ d.n = int(d.total % uint64(len(d.mem)))
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := u64(b)
+ return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
new file mode 100644
index 00000000..3e8b1325
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -0,0 +1,209 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ // Load fixed primes.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
+
+ // Load slice.
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, end
+
+ // Check whether we have at least one block.
+ CMPQ n, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
+
+ blockLoop()
+
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·primes+32(SB), h
+
+afterBlocks:
+ ADDQ n, h
+
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
+
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
+
+ CMPQ p, end
+ JLE loop8
+
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
+
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
+
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
+
+try1:
+ ADDQ $4, end
+ CMPQ p, end
+ JGE finalize
+
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
+
+ CMPQ p, end
+ JL loop1
+
+finalize:
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
+
+ MOVQ h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+
+ // Load slice.
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
+
+ // Load vN from d.
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+ blockLoop()
+
+ // Copy vN back to d.
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
+
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
+
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
new file mode 100644
index 00000000..7e3145a2
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest R1
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
+
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
+
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blockLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+afterLoop:
+ ADD n, h
+
+ TBZ $4, n, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, n, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, n, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, n, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, n, finalize
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h, h
+ MUL prime1, h
+
+finalize:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, n)
+
+ blockLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, n
+ MOVD n, ret+32(FP)
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
new file mode 100644
index 00000000..78f95f25
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -0,0 +1,15 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
new file mode 100644
index 00000000..118e49e8
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -0,0 +1,76 @@
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
+func Sum64(b []byte) uint64 {
+ // A simpler version would be
+ // d := New()
+ // d.Write(b)
+ // return d.Sum64()
+ // but this is faster, particularly for small inputs.
+
+ n := len(b)
+ var h uint64
+
+ if n >= 32 {
+ v1 := primes[0] + prime2
+ v2 := prime2
+ v3 := uint64(0)
+ v4 := -primes[0]
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = prime5
+ }
+
+ h += uint64(n)
+
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ b = b[4:]
+ }
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
new file mode 100644
index 00000000..05f5e7df
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -0,0 +1,16 @@
+//go:build appengine
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
+func Sum64String(s string) uint64 {
+ return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
new file mode 100644
index 00000000..cf9d42ae
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -0,0 +1,58 @@
+//go:build !appengine
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+ "unsafe"
+)
+
+// In the future it's possible that compiler optimizations will make these
+// XxxString functions unnecessary by realizing that calls such as
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
+// If that happens, even if we keep these functions they can be replaced with
+// the trivial safe code.
+
+// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
+//
+// var b []byte
+// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+// bh.Len = len(s)
+// bh.Cap = len(s)
+//
+// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
+// weight to this sequence of expressions that any function that uses it will
+// not be inlined. Instead, the functions below use a different unsafe
+// conversion designed to minimize the inliner weight and allow both to be
+// inlined. There is also a test (TestInlining) which verifies that these are
+// inlined.
+//
+// See https://github.com/golang/go/issues/42739 for discussion.
+
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+ b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
+ return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
+ // d.Write always returns len(s), nil.
+ // Ignoring the return output and returning these fixed values buys a
+ // savings of 6 in the inliner's cost model.
+ return len(s), nil
+}
+
+// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
+// of the first two words is the same as the layout of a string.
+type sliceHeader struct {
+ s string
+ cap int
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
index a65d88eb..04b4bebf 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -27,6 +27,7 @@ go_library(
"//internal/httprule",
"//utilities",
"@org_golang_google_genproto_googleapis_api//httpbody",
+ "@org_golang_google_grpc//:grpc",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//grpclog",
"@org_golang_google_grpc//health/grpc_health_v1",
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
index 2f2b3424..00b2228a 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
@@ -201,13 +201,13 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM
if timeout != 0 {
ctx, _ = context.WithTimeout(ctx, timeout)
}
- if len(pairs) == 0 {
- return ctx, nil, nil
- }
md := metadata.Pairs(pairs...)
for _, mda := range mux.metadataAnnotators {
md = metadata.Join(md, mda(ctx, req))
}
+ if len(md) == 0 {
+ return ctx, nil, nil
+ }
return ctx, md, nil
}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
index 8376d1e0..3d070630 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
@@ -66,7 +66,7 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
var (
// protoMessageType is stored to prevent constant lookup of the same type at runtime.
- protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+ protoMessageType = reflect.TypeFor[proto.Message]()
)
// marshalNonProto marshals a non-message field of a protobuf message.
@@ -325,9 +325,9 @@ type protoEnum interface {
EnumDescriptor() ([]byte, []int)
}
-var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
+var typeProtoEnum = reflect.TypeFor[protoEnum]()
-var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+var typeProtoMessage = reflect.TypeFor[proto.Message]()
// Delimiter for newline encoded JSON streams.
func (j *JSONPb) Delimiter() []byte {
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
index 19255ec4..3eb16167 100644
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -10,6 +10,7 @@ import (
"strings"
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
+ "google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/health/grpc_health_v1"
@@ -281,12 +282,19 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin
http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
) {
_, outboundMarshaler := MarshalerForRequest(s, r)
+ annotatedContext, err := AnnotateContext(r.Context(), s, r, grpc_health_v1.Health_Check_FullMethodName, WithHTTPPathPattern(endpointPath))
+ if err != nil {
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
- resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
+ var md ServerMetadata
+ resp, err := healthCheckClient.Check(annotatedContext, &grpc_health_v1.HealthCheckRequest{
Service: r.URL.Query().Get("service"),
- })
+ }, grpc.Header(&md.HeaderMD), grpc.Trailer(&md.TrailerMD))
+ annotatedContext = NewServerMetadataContext(annotatedContext, md)
if err != nil {
- s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err)
return
}
@@ -300,7 +308,7 @@ func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpoin
err = status.Error(codes.NotFound, resp.String())
}
- s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ s.errorHandler(annotatedContext, s, outboundMarshaler, w, r, err)
return
}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
index e854d7e8..2950fdb4 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
@@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) {
}
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
-func unmarshalJSON(dst []byte, src []byte) error {
+func unmarshalJSON(dst, src []byte) error {
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
src = src[1 : l-1]
}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
index 29e629d6..5bb3b16c 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
@@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error {
// strings or integers.
type protoUint64 uint64
-// Int64 returns the protoUint64 as a uint64.
+// Uint64 returns the protoUint64 as a uint64.
func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
// UnmarshalJSON decodes both strings and integers.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
index a13a6b73..67f80b6a 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"time"
)
@@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) {
}{
Alias: Alias(s),
ParentSpanID: parentSpanId,
- StartTime: uint64(startT),
- EndTime: uint64(endT),
+ StartTime: uint64(startT), // nolint:gosec // >0 checked above.
+ EndTime: uint64(endT), // nolint:gosec // >0 checked above.
})
}
@@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error {
case "startTimeUnixNano", "start_time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
- s.StartTime = time.Unix(0, int64(val.Uint64()))
+ v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
+ s.StartTime = time.Unix(0, v)
case "endTimeUnixNano", "end_time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
- s.EndTime = time.Unix(0, int64(val.Uint64()))
+ v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
+ s.EndTime = time.Unix(0, v)
case "attributes":
err = decoder.Decode(&s.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
@@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error {
type SpanFlags int32
const (
+ // SpanFlagsTraceFlagsMask is a mask for trace-flags.
+ //
// Bits 0-7 are used for trace flags.
SpanFlagsTraceFlagsMask SpanFlags = 255
- // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
- // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
- // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
SpanFlagsContextHasIsRemoteMask SpanFlags = 256
- // SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
+ // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is
+ // remote.
SpanFlagsContextIsRemoteMask SpanFlags = 512
)
@@ -263,26 +273,30 @@ const (
type SpanKind int32
const (
- // Indicates that the span represents an internal operation within an application,
- // as opposed to an operation happening at the boundaries. Default value.
+ // SpanKindInternal indicates that the span represents an internal
+ // operation within an application, as opposed to an operation happening at
+ // the boundaries.
SpanKindInternal SpanKind = 1
- // Indicates that the span covers server-side handling of an RPC or other
- // remote network request.
+ // SpanKindServer indicates that the span covers server-side handling of an
+ // RPC or other remote network request.
SpanKindServer SpanKind = 2
- // Indicates that the span describes a request to some remote service.
+ // SpanKindClient indicates that the span describes a request to some
+ // remote service.
SpanKindClient SpanKind = 3
- // Indicates that the span describes a producer sending a message to a broker.
- // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
- // between producer and consumer spans. A PRODUCER span ends when the message was accepted
- // by the broker while the logical processing of the message might span a much longer time.
+ // SpanKindProducer indicates that the span describes a producer sending a
+ // message to a broker. Unlike SpanKindClient and SpanKindServer, there is
+ // often no direct critical path latency relationship between producer and
+ // consumer spans. A SpanKindProducer span ends when the message was
+ // accepted by the broker while the logical processing of the message might
+ // span a much longer time.
SpanKindProducer SpanKind = 4
- // Indicates that the span describes consumer receiving a message from a broker.
- // Like the PRODUCER kind, there is often no direct critical path latency relationship
- // between producer and consumer spans.
+ // SpanKindConsumer indicates that the span describes a consumer receiving
+ // a message from a broker. Like SpanKindProducer, there is often no direct
+ // critical path latency relationship between producer and consumer spans.
SpanKindConsumer SpanKind = 5
)
-// Event is a time-stamped annotation of the span, consisting of user-supplied
+// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs.
type SpanEvent struct {
// time_unix_nano is the time the event occurred.
@@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) {
Time uint64 `json:"timeUnixNano,omitempty"`
}{
Alias: Alias(e),
- Time: uint64(t),
+ Time: uint64(t), //nolint:gosec // >0 checked above
})
}
@@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
case "timeUnixNano", "time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
- se.Time = time.Unix(0, int64(val.Uint64()))
+ v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
+ se.Time = time.Unix(0, v)
case "name":
err = decoder.Decode(&se.Name)
case "attributes":
@@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
return nil
}
-// A pointer from the current span to another span in the same trace or in a
-// different trace. For example, this can be used in batching operations,
-// where a single batch handler processes multiple requests from different
-// traces or when the handler receives a request from a different project.
+// SpanLink is a reference from the current span to another span in the same
+// trace or in a different trace. For example, this can be used in batching
+// operations, where a single batch handler processes multiple requests from
+// different traces or when the handler receives a request from a different
+// project.
type SpanLink struct {
// A unique identifier of a trace that this linked span is part of. The ID is a
// 16-byte array.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
index 1217776e..a2802764 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
@@ -3,17 +3,19 @@
package telemetry
+// StatusCode is the status of a Span.
+//
// For the semantics of status codes see
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
type StatusCode int32
const (
- // The default status.
+ // StatusCodeUnset is the default status.
StatusCodeUnset StatusCode = 0
- // The Span has been validated by an Application developer or Operator to
- // have completed successfully.
+ // StatusCodeOK is used when the Span has been validated by an Application
+ // developer or Operator to have completed successfully.
StatusCodeOK StatusCode = 1
- // The Span contains an error.
+ // StatusCodeError is used when the Span contains an error.
StatusCodeError StatusCode = 2
)
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
index 69a348f0..44197b80 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
@@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error {
return nil
}
-// A collection of ScopeSpans from a Resource.
+// ResourceSpans is a collection of ScopeSpans from a Resource.
type ResourceSpans struct {
// The resource for the spans in this message.
// If this field is not set then no resource info is known.
@@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
return nil
}
-// A collection of Spans produced by an InstrumentationScope.
+// ScopeSpans is a collection of Spans produced by an InstrumentationScope.
type ScopeSpans struct {
// The instrumentation scope information for the spans in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
index 0dd01b06..022768bb 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
@@ -1,8 +1,6 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-//go:generate stringer -type=ValueKind -trimprefix=ValueKind
-
package telemetry
import (
@@ -23,7 +21,7 @@ import (
// A zero value is valid and represents an empty value.
type Value struct {
// Ensure forward compatibility by explicitly making this not comparable.
- noCmp [0]func() //nolint: unused // This is indeed used.
+ noCmp [0]func() //nolint:unused // This is indeed used.
// num holds the value for Int64, Float64, and Bool. It holds the length
// for String, Bytes, Slice, Map.
@@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) }
// Int64Value returns a [Value] for an int64.
func Int64Value(v int64) Value {
- return Value{num: uint64(v), any: ValueKindInt64}
+ return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv.
}
// Float64Value returns a [Value] for a float64.
@@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 {
// this will return garbage.
func (v Value) asInt64() int64 {
// Assumes v.num was a valid int64 (overflow not checked).
- return int64(v.num) // nolint: gosec
+ return int64(v.num) //nolint:gosec // Bounded.
}
// AsBool returns the value held by v as a bool.
@@ -309,13 +307,13 @@ func (v Value) String() string {
return v.asString()
case ValueKindInt64:
// Assumes v.num was a valid int64 (overflow not checked).
- return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded.
case ValueKindFloat64:
return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
case ValueKindBool:
return strconv.FormatBool(v.asBool())
case ValueKindBytes:
- return fmt.Sprint(v.asBytes())
+ return string(v.asBytes())
case ValueKindMap:
return fmt.Sprint(v.asMap())
case ValueKindSlice:
@@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) {
case ValueKindInt64:
return json.Marshal(struct {
Value string `json:"intValue"`
- }{strconv.FormatInt(int64(v.num), 10)})
+ }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv.
case ValueKindFloat64:
return json.Marshal(struct {
Value float64 `json:"doubleValue"`
diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go
index 6ebea12a..815d271f 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/span.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/span.go
@@ -6,6 +6,7 @@ package sdk
import (
"encoding/json"
"fmt"
+ "math"
"reflect"
"runtime"
"strings"
@@ -16,7 +17,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
@@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
limit := maxSpan.Attrs
if limit == 0 {
// No attributes allowed.
- s.span.DroppedAttrs += uint32(len(attrs))
+ n := int64(len(attrs))
+ if n > 0 {
+ s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked.
+ min(n, math.MaxUint32),
+ )
+ }
return
}
@@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
// number of dropped attributes is also returned.
func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ n := len(attrs)
if limit == 0 {
- return nil, uint32(len(attrs))
+ var out uint32
+ if n > 0 {
+ out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked.
+ }
+ return nil, out
}
if limit < 0 {
@@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u
return convAttrs(attrs), 0
}
- limit = min(len(attrs), limit)
- return convAttrs(attrs[:limit]), uint32(len(attrs) - limit)
+ if n < 0 {
+ n = 0
+ }
+
+ limit = min(n, limit)
+ return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked.
}
func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
index cbcfabde..e09acf02 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
@@ -5,6 +5,7 @@ package sdk
import (
"context"
+ "math"
"time"
"go.opentelemetry.io/otel/trace"
@@ -21,15 +22,20 @@ type tracer struct {
var _ trace.Tracer = tracer{}
-func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
- var psc trace.SpanContext
+func (t tracer) Start(
+ ctx context.Context,
+ name string,
+ opts ...trace.SpanStartOption,
+) (context.Context, trace.Span) {
+ var psc, sc trace.SpanContext
sampled := true
span := new(span)
// Ask eBPF for sampling decision and span context info.
- t.start(ctx, span, &psc, &sampled, &span.spanContext)
+ t.start(ctx, span, &psc, &sampled, &sc)
span.sampled.Store(sampled)
+ span.spanContext = sc
ctx = trace.ContextWithSpan(ctx, span)
@@ -58,7 +64,13 @@ func (t *tracer) start(
// start is used for testing.
var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
-func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
+var intToUint32Bound = min(math.MaxInt, math.MaxUint32)
+
+func (t tracer) traces(
+ name string,
+ cfg trace.SpanConfig,
+ sc, psc trace.SpanContext,
+) (*telemetry.Traces, *telemetry.Span) {
span := &telemetry.Span{
TraceID: telemetry.TraceID(sc.TraceID()),
SpanID: telemetry.SpanID(sc.SpanID()),
@@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont
links := cfg.Links()
if limit := maxSpan.Links; limit == 0 {
- span.DroppedLinks = uint32(len(links))
+ n := len(links)
+ if n > 0 {
+ bounded := max(min(n, intToUint32Bound), 0)
+ span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
+ }
} else {
if limit > 0 {
n := max(len(links)-limit, 0)
- span.DroppedLinks = uint32(n)
+ bounded := min(n, intToUint32Bound)
+ span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
links = links[n:]
}
span.Links = convLinks(links)
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
index 2b53a25e..a6d0cbcc 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -8,3 +8,4 @@ nam
valu
thirdparty
addOpt
+observ
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index b01762ff..d4872287 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -16,6 +16,7 @@ linters:
- govet
- ineffassign
- misspell
+ - modernize
- perfsprint
- revive
- staticcheck
@@ -111,6 +112,9 @@ linters:
locale: US
ignore-rules:
- cancelled
+ modernize:
+ disable:
+ - omitzero
perfsprint:
int-conversion: true
err-error: true
@@ -197,6 +201,9 @@ linters:
- float-compare
- go-require
- require-error
+ usetesting:
+ context-background: true
+ context-todo: true
exclusions:
generated: lax
presets:
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
index 53285058..994b677d 100644
--- a/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -1,4 +1,5 @@
http://localhost
+https://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects
@@ -6,4 +7,7 @@ https://github.com/open-telemetry/opentelemetry-go/projects
https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+]
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
-http://4.3.2.1:78/user/123
\ No newline at end of file
+http://4.3.2.1:78/user/123
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317
+# URL works, but it has blocked link checkers.
+https://dl.acm.org/doi/10.1145/198429.198435
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index f3abcfdc..e725282b 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -11,6 +11,111 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
+## [1.40.0/0.62.0/0.16.0] 2026-02-02
+
+### Added
+
+- Add `AlwaysRecord` sampler in `go.opentelemetry.io/otel/sdk/trace`. (#7724)
+- Add `Enabled` method to all synchronous instrument interfaces (`Float64Counter`, `Float64UpDownCounter`, `Float64Histogram`, `Float64Gauge`, `Int64Counter`, `Int64UpDownCounter`, `Int64Histogram`, `Int64Gauge`,) in `go.opentelemetry.io/otel/metric`.
+ This stabilizes the synchronous instrument enabled feature, allowing users to check if an instrument will process measurements before performing computationally expensive operations. (#7763)
+- Add `go.opentelemetry.io/otel/semconv/v1.39.0` package.
+ The package contains semantic conventions from the `v1.39.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.39.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.38.0.` (#7783, #7789)
+
+### Changed
+
+- Improve the concurrent performance of `HistogramReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar` by 4x. (#7443)
+- Improve the concurrent performance of `FixedSizeReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar`. (#7447)
+- Improve performance of concurrent histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7474)
+- Improve performance of concurrent synchronous gauge measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7478)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`. (#7492)
+- `Exporter` in `go.opentelemetry.io/otel/exporters/prometheus` ignores metrics with the scope `go.opentelemetry.io/contrib/bridges/prometheus`.
+ This prevents scrape failures when the Prometheus exporter is misconfigured to get data from the Prometheus bridge. (#7688)
+- Improve performance of concurrent exponential histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7702)
+- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854)
+- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854)
+
+### Fixed
+
+- Fix bad log message when key-value pairs are dropped because of key duplication in `go.opentelemetry.io/otel/sdk/log`. (#7662)
+- Fix `DroppedAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not count the non-attribute key-value pairs dropped because of key duplication. (#7662)
+- Fix `SetAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not log that attributes are dropped when they are actually not dropped. (#7662)
+- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to correctly handle HTTP/2 `GOAWAY` frame. (#7794)
+- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `ioreg` command on Darwin (macOS). (#7818)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/exporters/zipkin`.
+ For more information, see the [OTel blog post deprecating the Zipkin exporter](https://opentelemetry.io/blog/2025/deprecating-zipkin-exporters/). (#7670)
+
+## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05
+
+### Added
+
+- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175)
+- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages.
+ This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287)
+- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`.
+ Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353)
+- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434)
+- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486)
+- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512)
+- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524)
+- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571)
+- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608)
+- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`.
+ All `Processor` implementations now include an `Enabled` method. (#7639)
+- The `go.opentelemetry.io/otel/semconv/v1.38.0` package.
+ The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648)
+
+### Changed
+
+- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set.
+ Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175)
+- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266)
+- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266)
+- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266)
+- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302)
+- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306)
+- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`.
+ ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded.
+ Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default.
+ To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363)
+- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371)
+- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421)
+- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427)
+- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438)
+- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types.
+ If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442)
+
+### Fixed
+
+- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them.
+ Attributes with duplicate keys will use the last value passed. (#7300)
+- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372)
+- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403)
+- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655)
+- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656)
+
+### Removed
+
+- Drop support for [Go 1.23]. (#7274)
+- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`.
+ The `Enabled` method has been added to the `Processor` interface instead.
+ All `Processor` implementations must now implement the `Enabled` method.
+ Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639)
+
## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29
This release is the last to support [Go 1.23].
@@ -3430,8 +3535,11 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.40.0...HEAD
+[1.40.0/0.62.0/0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.40.0
+[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0
[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
+[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1
[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 0b3ae855..38dede93 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel
(This may print some warning about "build constraints exclude all Go
files", just ignore it.)
-This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
-can alternatively use `git` directly with:
+This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`.
+Alternatively, you can use `git` directly with:
```sh
git clone https://github.com/open-telemetry/opentelemetry-go
@@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go
that name is a kind of a redirector to GitHub that `go get` can
understand, but `git` does not.)
-This would put the project in the `opentelemetry-go` directory in
-current working directory.
+This will add the project as `opentelemetry-go` within the current directory.
Enter the newly created directory and add your fork as a new remote:
@@ -109,7 +108,7 @@ A PR is considered **ready to merge** when:
This is not enforced through automation, but needs to be validated by the
maintainer merging.
- * At least one of the qualified approvals need to be from an
+ * At least one of the qualified approvals needs to be from an
[Approver]/[Maintainer] affiliated with a different company than the author
of the PR.
* PRs introducing changes that have already been discussed and consensus
@@ -166,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
### Focus on Capabilities, Not Structure Compliance
OpenTelemetry is an evolving specification, one where the desires and
-use cases are clear, but the method to satisfy those uses cases are
+use cases are clear, but the methods to satisfy those use cases are
not.
As such, Contributions should provide functionality and behavior that
-conforms to the specification, but the interface and structure is
+conforms to the specification, but the interface and structure are
flexible.
It is preferable to have contributions follow the idioms of the
@@ -217,7 +216,7 @@ about dependency compatibility.
This project does not partition dependencies based on the environment (i.e.
`development`, `staging`, `production`).
-Only the dependencies explicitly included in the released modules have be
+Only the dependencies explicitly included in the released modules have been
tested and verified to work with the released code. No other guarantee is made
about the compatibility of other dependencies.
@@ -635,8 +634,8 @@ is not in their root name.
The use of internal packages should be scoped to a single module. A sub-module
should never import from a parent internal package. This creates a coupling
-between the two modules where a user can upgrade the parent without the child
-and if the internal package API has changed it will fail to upgrade[^3].
+between the two modules where a user can upgrade the parent without the child,
+and if the internal package API has changed, it will fail to upgrade[^3].
There are two known exceptions to this rule:
@@ -657,7 +656,7 @@ this.
### Ignoring context cancellation
-OpenTelemetry API implementations need to ignore the cancellation of the context that are
+OpenTelemetry API implementations need to ignore the cancellation of the context that is
passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
Recording methods should not return an error describing the cancellation state of the context
when they complete, nor should they abort any work.
@@ -675,6 +674,441 @@ force flushing telemetry, shutting down a signal provider) the context cancellat
should be honored. This means all work done on behalf of the user provided context
should be canceled.
+### Observability
+
+OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself.
+This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications.
+
+This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components.
+
+#### Environment Variable Activation
+
+Observability features are currently experimental.
+They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable.
+This follows the established experimental feature pattern used throughout the SDK.
+
+Components should check for this environment variable using a consistent pattern:
+
+```go
+import "go.opentelemetry.io/otel/*/internal/x"
+
+if x.Observability.Enabled() {
+ // Initialize observability metrics
+}
+```
+
+**References**:
+
+- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go)
+- [sdk](./sdk/internal/x/x.go)
+
+#### Encapsulation
+
+Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`).
+It should not be mixed into the instrumented component.
+
+Prefer this:
+
+```go
+type SDKComponent struct {
+ inst *instrumentation
+}
+
+type instrumentation struct {
+ inflight otelconv.SDKComponentInflight
+ exported otelconv.SDKComponentExported
+}
+```
+
+To this:
+
+```go
+// ❌ Avoid this pattern.
+type SDKComponent struct {
+ /* other SDKComponent fields... */
+
+ inflight otelconv.SDKComponentInflight
+ exported otelconv.SDKComponentExported
+}
+```
+
+The instrumentation code should not bloat the code being instrumented.
+Likely, this means its own file, or its own package if it is complex or reused.
+
+#### Initialization
+
+Instrumentation setup should be explicit, side-effect free, and local to the relevant component.
+Avoid relying on global or implicit [side effects][side-effect] for initialization.
+
+Encapsulate setup in constructor functions, ensuring clear ownership and scope:
+
+```go
+import (
+ "errors"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
+ "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv"
+)
+
+type SDKComponent struct {
+ inst *instrumentation
+}
+
+func NewSDKComponent(config Config) (*SDKComponent, error) {
+ inst, err := newInstrumentation()
+ if err != nil {
+ return nil, err
+ }
+ return &SDKComponent{inst: inst}, nil
+}
+
+type instrumentation struct {
+ inflight otelconv.SDKComponentInflight
+ exported otelconv.SDKComponentExported
+}
+
+func newInstrumentation() (*instrumentation, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ meter := otel.GetMeterProvider().Meter(
+ "",
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ inst := &instrumentation{}
+
+ var err, e error
+ inst.inflight, e = otelconv.NewSDKComponentInflight(meter)
+ err = errors.Join(err, e)
+
+ inst.exported, e = otelconv.NewSDKComponentExported(meter)
+ err = errors.Join(err, e)
+
+ return inst, err
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func (c *Component) initObservability() {
+ // Initialize observability metrics
+ if !x.Observability.Enabled() {
+ return
+ }
+
+ // Initialize observability metrics
+ c.inst = &instrumentation{/* ... */}
+}
+```
+
+[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science)
+
+#### Performance
+
+When observability is disabled there should be little to no overhead.
+
+```go
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ if e.inst != nil {
+ attrs := expensiveOperation()
+ e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
+ }
+ // Export spans...
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ attrs := expensiveOperation()
+ e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
+ // Export spans...
+}
+
+func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) {
+ if i == nil || i.inflight == nil {
+ return
+ }
+ i.inflight.Add(ctx, count, metric.WithAttributes(attrs...))
+}
+```
+
+When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead.
+
+##### Attribute and Option Allocation Management
+
+Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes.
+
+```go
+var (
+ attrPool = sync.Pool{
+ New: func() any {
+ // Pre-allocate common capacity
+ knownCap := 8 // Adjust based on expected usage
+ s := make([]attribute.KeyValue, 0, knownCap)
+ // Return a pointer to avoid extra allocation on Put().
+ return &s
+ },
+ }
+
+ addOptPool = &sync.Pool{
+ New: func() any {
+ const n = 1 // WithAttributeSet
+ o := make([]metric.AddOption, 0, n)
+ // Return a pointer to avoid extra allocation on Put().
+ return &o
+ },
+ }
+)
+
+func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) {
+ attrs := attrPool.Get().(*[]attribute.KeyValue)
+ defer func() {
+ *attrs = (*attrs)[:0] // Reset.
+ attrPool.Put(attrs)
+ }()
+
+ *attrs = append(*attrs, baseAttrs...)
+ // Add any dynamic attributes.
+ *attrs = append(*attrs, semconv.OTelComponentName("exporter-1"))
+
+ addOpt := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *addOpt = (*addOpt)[:0]
+ addOptPool.Put(addOpt)
+ }()
+
+ set := attribute.NewSet(*attrs...)
+ *addOpt = append(*addOpt, metric.WithAttributeSet(set))
+
+ i.counter.Add(ctx, value, *addOpt...)
+}
+```
+
+Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used.
+This amortizes the cost of allocation and synchronization.
+Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness.
+
+[`sync.Pool`]: https://pkg.go.dev/sync#Pool
+
+##### Cache common attribute sets for repeated measurements
+
+If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes.
+
+```go
+type spanLiveSetKey struct {
+ sampled bool
+}
+
+var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
+ {true}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordAndSample,
+ ),
+ ),
+ {false}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordOnly,
+ ),
+ ),
+}
+
+func spanLiveSet(sampled bool) attribute.Set {
+ key := spanLiveSetKey{sampled: sampled}
+ return spanLiveSetCache[key]
+}
+```
+
+##### Benchmarking
+
+Always provide benchmarks when introducing or refactoring instrumentation.
+Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios:
+
+```go
+func BenchmarkExportSpans(b *testing.B) {
+ scenarios := []struct {
+ name string
+ obsEnabled bool
+ }{
+ {"ObsDisabled", false},
+ {"ObsEnabled", true},
+ }
+
+ for _, scenario := range scenarios {
+ b.Run(scenario.name, func(b *testing.B) {
+ b.Setenv(
+ "OTEL_GO_X_OBSERVABILITY",
+ strconv.FormatBool(scenario.obsEnabled),
+ )
+
+ exporter := NewExporter()
+ spans := generateTestSpans(100)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ _ = exporter.ExportSpans(context.Background(), spans)
+ }
+ })
+ }
+}
+```
+
+#### Error Handling and Robustness
+
+Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible.
+
+```go
+func newInstrumentation() (*instrumentation, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ m := otel.GetMeterProvider().Meter(/* initialize meter */)
+ counter, err := otelconv.NewSDKComponentCounter(m)
+ // Use the partially initialized counter if available.
+ i := &instrumentation{counter: counter}
+ // Return any error to the caller.
+ return i, err
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func newInstrumentation() *instrumentation {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ m := otel.GetMeterProvider().Meter(/* initialize meter */)
+ counter, err := otelconv.NewSDKComponentCounter(m)
+ if err != nil {
+ // ❌ Do not dump the error to the OTel Handler. Return it to the
+ // caller.
+ otel.Handle(err)
+ // ❌ Do not return nil if we can still use the partially initialized
+ // counter.
+ return nil
+ }
+ return &instrumentation{counter: counter}
+}
+```
+
+If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`.
+
+#### Context Propagation
+
+Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context:
+
+```go
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ // Use the provided context for observability measurements
+ e.inst.recordSpanExportStarted(ctx, len(spans))
+
+ err := e.doExport(ctx, spans)
+
+ if err != nil {
+ e.inst.recordSpanExportFailed(ctx, len(spans), err)
+ } else {
+ e.inst.recordSpanExportSucceeded(ctx, len(spans))
+ }
+
+ return err
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ // ❌ Do not break the context propagation.
+ e.inst.recordSpanExportStarted(context.Background(), len(spans))
+
+ err := e.doExport(ctx, spans)
+
+ /* ... */
+
+ return err
+}
+```
+
+#### Semantic Conventions Compliance
+
+All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md).
+
+Use the metric semantic conventions convenience package [otelconv](./semconv/v1.39.0/otelconv/metric.go).
+
+##### Component Identification
+
+Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes).
+
+If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier.
+
+```go
+componentType := "go.opentelemetry.io/otel/sdk/trace.Span"
+```
+
+```go
+// ❌ Do not do this.
+componentType := "trace-span"
+```
+
+The component name should be a stable unique identifier for the specific instance of the component.
+
+Use a global counter to ensure uniqueness if necessary.
+
+```go
+// Unique 0-based ID counter for component instances.
+var componentIDCounter atomic.Int64
+
+// nextID returns the next unique ID for a component.
+func nextID() int64 {
+ return componentIDCounter.Add(1) - 1
+}
+
+// componentName returns a unique name for the component instance.
+func componentName() attribute.KeyValue {
+ id := nextID()
+ name := fmt.Sprintf("%s/%d", componentType, id)
+ return semconv.OTelComponentName(name)
+}
+```
+
+The component ID will need to be resettable for deterministic testing.
+If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter.
+See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference.
+
+#### Testing
+
+Use deterministic testing with isolated state:
+
+```go
+func TestObservability(t *testing.T) {
+ // Restore state after test to ensure this does not affect other tests.
+ prev := otel.GetMeterProvider()
+ t.Cleanup(func() { otel.SetMeterProvider(prev) })
+
+ // Isolate the meter provider for deterministic testing
+ reader := metric.NewManualReader()
+ meterProvider := metric.NewMeterProvider(metric.WithReader(reader))
+ otel.SetMeterProvider(meterProvider)
+
+ // Use t.Setenv to ensure environment variable is restored after test.
+ t.Setenv("OTEL_GO_X_OBSERVABILITY", "true")
+
+ // Reset component ID counter to ensure deterministic component names.
+ componentIDCounter.Store(0)
+
+ /* ... test code ... */
+}
+```
+
+Test order should not affect results.
+Ensure that any global state (e.g. component ID counters) is reset between tests.
+
## Approvers and Maintainers
### Maintainers
@@ -696,7 +1130,6 @@ For more information about the approver role, see the [community repository](htt
### Triagers
- [Alex Kats](https://github.com/akats7), Capital One
-- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
@@ -704,6 +1137,7 @@ For more information about the triager role, see the [community repository](http
- [Aaron Clawson](https://github.com/MadVikingGod)
- [Anthony Mirabella](https://github.com/Aneurysm9)
+- [Cheng-Zhen Yang](https://github.com/scorpionknifes)
- [Chester Cheung](https://github.com/hanyuancheung)
- [Evan Torrie](https://github.com/evantorrie)
- [Gustavo Silva Paiva](https://github.com/paivagustavo)
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index bc0f1f92..44870248 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -146,11 +146,12 @@ build-tests/%:
# Tests
-TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz
.PHONY: $(TEST_TARGETS) test
test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short
+test-fuzz: ARGS=-fuzztime=10s -fuzz
test-verbose: ARGS=-v -race
test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
test-concurrent-safe: TIMEOUT=120
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 6b7ab5f2..c6335954 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -55,25 +55,18 @@ Currently, this project supports the following environments.
|----------|------------|--------------|
| Ubuntu | 1.25 | amd64 |
| Ubuntu | 1.24 | amd64 |
-| Ubuntu | 1.23 | amd64 |
| Ubuntu | 1.25 | 386 |
| Ubuntu | 1.24 | 386 |
-| Ubuntu | 1.23 | 386 |
| Ubuntu | 1.25 | arm64 |
| Ubuntu | 1.24 | arm64 |
-| Ubuntu | 1.23 | arm64 |
-| macOS 13 | 1.25 | amd64 |
-| macOS 13 | 1.24 | amd64 |
-| macOS 13 | 1.23 | amd64 |
+| macOS | 1.25 | amd64 |
+| macOS | 1.24 | amd64 |
| macOS | 1.25 | arm64 |
| macOS | 1.24 | arm64 |
-| macOS | 1.23 | arm64 |
| Windows | 1.25 | amd64 |
| Windows | 1.24 | amd64 |
-| Windows | 1.23 | amd64 |
| Windows | 1.25 | 386 |
| Windows | 1.24 | 386 |
-| Windows | 1.23 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 1ddcdef0..861756fd 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -24,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit
## Breaking changes validation
-You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
+You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API.
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
@@ -62,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
```
3. Update the [Changelog](./CHANGELOG.md).
- - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
+ - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand.
To verify this, you can look directly at the commits since the ``.
```
@@ -107,34 +107,50 @@ It is critical you make sure the version you push upstream is correct.
...
```
-## Release
+## Sign artifacts
-Finally create a Release for the new `` on GitHub.
-The release body should include all the release notes from the Changelog for this release.
+To ensure we comply with CNCF best practices, we need to sign the release artifacts.
-### Sign the Release Artifact
+Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag.
+Both archives need to be signed with your GPG key.
-To ensure we comply with CNCF best practices, we need to sign the release artifact.
-The tarball attached to the GitHub release needs to be signed with your GPG key.
+You can use [this script] to verify the contents of the archives before signing them.
-Follow [these steps] to sign the release artifact and upload it to GitHub.
-You can use [this script] to verify the contents of the tarball before signing it.
+To find your GPG key ID, run:
-Be sure to use the correct GPG key when signing the release artifact.
+```terminal
+gpg --list-secret-keys --keyid-format=long
+```
+
+The key ID is the 16-character string after `sec rsa4096/` (or similar).
+
+Set environment variables and sign both artifacts:
```terminal
-gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz
+export VERSION="" # e.g., v1.32.0
+export KEY_ID=""
+
+gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz
+gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip
```
-You can verify the signature with:
+You can verify the signatures with:
```terminal
-gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz
+gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz
+gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip
```
-[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases
[this script]: https://github.com/MrAlias/attest-sh
+## Release
+
+Finally create a Release for the new `` on GitHub.
+The release body should include all the release notes from the Changelog for this release.
+
+***IMPORTANT***: GitHub Releases are immutable once created.
+You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later.
+
## Post-Release
### Contrib Repository
@@ -160,14 +176,6 @@ This helps track what changes were included in each release.
Once all related issues and PRs have been added to the milestone, close the milestone.
-### Demo Repository
-
-Bump the dependencies in the following Go services:
-
-- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
-- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
-- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
-
### Close the `Version Release` issue
Once the todo list in the `Version Release` issue is complete, close the issue.
diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md
index b8cb605c..b27c9e84 100644
--- a/vendor/go.opentelemetry.io/otel/VERSIONING.md
+++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md
@@ -83,7 +83,7 @@ is designed so the following goals can be achieved.
in either the module path or the import path.
* In addition to public APIs, telemetry produced by stable instrumentation
will remain stable and backwards compatible. This is to avoid breaking
- alerts and dashboard.
+ alerts and dashboards.
* Modules will be used to encapsulate instrumentation, detectors, exporters,
propagators, and any other independent sets of related components.
* Experimental modules still under active development will be versioned at
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
index 6333d34b..6cc1a165 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -16,7 +16,7 @@ type (
// set into a wire representation.
Encoder interface {
// Encode returns the serialized encoding of the attribute set using
- // its Iterator. This result may be cached by a attribute.Set.
+ // its Iterator. This result may be cached by an attribute.Set.
Encode(iterator Iterator) string
// ID returns a value that is unique for each class of attribute
diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go
new file mode 100644
index 00000000..6aa69aea
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.opentelemetry.io/otel/attribute/internal/xxhash"
+)
+
+// Type identifiers. These identifiers are hashed before the value of the
+// corresponding type. This is done to distinguish values that are hashed with
+// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and
+// int64(0)).
+//
+// These are all 8 byte length strings converted to a uint64 representation. A
+// uint64 is used instead of the string directly as an optimization, it avoids
+// the for loop in [xxhash] which adds minor overhead.
+const (
+ boolID uint64 = 7953749933313450591 // "_boolean" (little endian)
+ int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian)
+ float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian)
+ stringID uint64 = 6874584755375207263 // "_string_" (little endian)
+ boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian)
+ int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian)
+ float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian)
+ stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian)
+)
+
+// hashKVs returns a new xxHash64 hash of kvs.
+func hashKVs(kvs []KeyValue) uint64 {
+ h := xxhash.New()
+ for _, kv := range kvs {
+ h = hashKV(h, kv)
+ }
+ return h.Sum64()
+}
+
+// hashKV returns the xxHash64 hash of kv with h as the base.
+func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash {
+ h = h.String(string(kv.Key))
+
+ switch kv.Value.Type() {
+ case BOOL:
+ h = h.Uint64(boolID)
+ h = h.Uint64(kv.Value.numeric)
+ case INT64:
+ h = h.Uint64(int64ID)
+ h = h.Uint64(kv.Value.numeric)
+ case FLOAT64:
+ h = h.Uint64(float64ID)
+ // Assumes numeric stored with math.Float64bits.
+ h = h.Uint64(kv.Value.numeric)
+ case STRING:
+ h = h.Uint64(stringID)
+ h = h.String(kv.Value.stringly)
+ case BOOLSLICE:
+ h = h.Uint64(boolSliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.Bool(rv.Index(i).Bool())
+ }
+ case INT64SLICE:
+ h = h.Uint64(int64SliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.Int64(rv.Index(i).Int())
+ }
+ case FLOAT64SLICE:
+ h = h.Uint64(float64SliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.Float64(rv.Index(i).Float())
+ }
+ case STRINGSLICE:
+ h = h.Uint64(stringSliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.String(rv.Index(i).String())
+ }
+ case INVALID:
+ default:
+ // Logging is an alternative, but using the internal logger here
+ // causes an import cycle so it is not done.
+ v := kv.Value.AsInterface()
+ msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v)
+ panic(msg)
+ }
+ return h
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
index 08755043..7f5eae87 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
@@ -13,32 +13,28 @@ import (
// BoolSliceValue converts a bool slice into an array with same elements as slice.
func BoolSliceValue(v []bool) any {
- var zero bool
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[bool]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
func Int64SliceValue(v []int64) any {
- var zero int64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
func Float64SliceValue(v []float64) any {
- var zero float64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[float64]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
// StringSliceValue converts a string slice into an array with same elements as slice.
func StringSliceValue(v []string) any {
- var zero string
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[string]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go
new file mode 100644
index 00000000..113a9783
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go
@@ -0,0 +1,64 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package xxhash provides a wrapper around the xxhash library for attribute hashing.
+package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash"
+
+import (
+ "encoding/binary"
+ "math"
+
+ "github.com/cespare/xxhash/v2"
+)
+
+// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values.
+type Hash struct {
+ d *xxhash.Digest
+}
+
+// New returns a new initialized xxHash64 hasher.
+func New() Hash {
+ return Hash{d: xxhash.New()}
+}
+
+func (h Hash) Uint64(val uint64) Hash {
+ var buf [8]byte
+ binary.LittleEndian.PutUint64(buf[:], val)
+ // errors from Write are always nil for xxhash
+ // if it returns an err then panic
+ _, err := h.d.Write(buf[:])
+ if err != nil {
+ panic("xxhash write of uint64 failed: " + err.Error())
+ }
+ return h
+}
+
+func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function.
+ if val {
+ return h.Uint64(1)
+ }
+ return h.Uint64(0)
+}
+
+func (h Hash) Float64(val float64) Hash {
+ return h.Uint64(math.Float64bits(val))
+}
+
+func (h Hash) Int64(val int64) Hash {
+ return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing.
+}
+
+func (h Hash) String(val string) Hash {
+ // errors from WriteString are always nil for xxhash
+ // if it returns an err then panic
+ _, err := h.d.WriteString(val)
+ if err != nil {
+ panic("xxhash write of string failed: " + err.Error())
+ }
+ return h
+}
+
+// Sum64 returns the current hash value.
+func (h Hash) Sum64() uint64 {
+ return h.d.Sum64()
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index 64735d38..6572c98b 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -9,6 +9,8 @@ import (
"reflect"
"slices"
"sort"
+
+ "go.opentelemetry.io/otel/attribute/internal/xxhash"
)
type (
@@ -23,19 +25,19 @@ type (
// the Equals method to ensure stable equivalence checking.
//
// Users should also use the Distinct returned from Equivalent as a map key
- // instead of a Set directly. In addition to that type providing guarantees
- // on stable equivalence, it may also provide performance improvements.
+ // instead of a Set directly. Set has relatively poor performance when used
+ // as a map key compared to Distinct.
Set struct {
- equivalent Distinct
+ hash uint64
+ data any
}
- // Distinct is a unique identifier of a Set.
+ // Distinct is an identifier of a Set which is very likely to be unique.
//
- // Distinct is designed to ensure equivalence stability: comparisons will
- // return the same value across versions. For this reason, Distinct should
- // always be used as a map key instead of a Set.
+ // Distinct should be used as a map key instead of a Set for to provide better
+ // performance for map operations.
Distinct struct {
- iface any
+ hash uint64
}
// Sortable implements sort.Interface, used for sorting KeyValue.
@@ -46,15 +48,34 @@ type (
Sortable []KeyValue
)
+// Compile time check these types remain comparable.
+var (
+ _ = isComparable(Set{})
+ _ = isComparable(Distinct{})
+)
+
+func isComparable[T comparable](t T) T { return t }
+
var (
// keyValueType is used in computeDistinctReflect.
- keyValueType = reflect.TypeOf(KeyValue{})
+ keyValueType = reflect.TypeFor[KeyValue]()
+
+ // emptyHash is the hash of an empty set.
+ emptyHash = xxhash.New().Sum64()
- // emptySet is returned for empty attribute sets.
- emptySet = &Set{
- equivalent: Distinct{
- iface: [0]KeyValue{},
- },
+ // userDefinedEmptySet is an empty set. It was mistakenly exposed to users
+ // as something they can assign to, so it must remain addressable and
+ // mutable.
+ //
+ // This is kept for backwards compatibility, but should not be used in new code.
+ userDefinedEmptySet = &Set{
+ hash: emptyHash,
+ data: [0]KeyValue{},
+ }
+
+ emptySet = Set{
+ hash: emptyHash,
+ data: [0]KeyValue{},
}
)
@@ -62,33 +83,35 @@ var (
//
// This is a convenience provided for optimized calling utility.
func EmptySet() *Set {
- return emptySet
-}
-
-// reflectValue abbreviates reflect.ValueOf(d).
-func (d Distinct) reflectValue() reflect.Value {
- return reflect.ValueOf(d.iface)
+ // Continue to return the pointer to the user-defined empty set for
+ // backwards-compatibility.
+ //
+ // New code should not use this, instead use emptySet.
+ return userDefinedEmptySet
}
// Valid reports whether this value refers to a valid Set.
-func (d Distinct) Valid() bool {
- return d.iface != nil
+func (d Distinct) Valid() bool { return d.hash != 0 }
+
+// reflectValue abbreviates reflect.ValueOf(d).
+func (l Set) reflectValue() reflect.Value {
+ return reflect.ValueOf(l.data)
}
// Len returns the number of attributes in this set.
func (l *Set) Len() int {
- if l == nil || !l.equivalent.Valid() {
+ if l == nil || l.hash == 0 {
return 0
}
- return l.equivalent.reflectValue().Len()
+ return l.reflectValue().Len()
}
// Get returns the KeyValue at ordered position idx in this set.
func (l *Set) Get(idx int) (KeyValue, bool) {
- if l == nil || !l.equivalent.Valid() {
+ if l == nil || l.hash == 0 {
return KeyValue{}, false
}
- value := l.equivalent.reflectValue()
+ value := l.reflectValue()
if idx >= 0 && idx < value.Len() {
// Note: The Go compiler successfully avoids an allocation for
@@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) {
// Value returns the value of a specified key in this set.
func (l *Set) Value(k Key) (Value, bool) {
- if l == nil || !l.equivalent.Valid() {
+ if l == nil || l.hash == 0 {
return Value{}, false
}
- rValue := l.equivalent.reflectValue()
+ rValue := l.reflectValue()
vlen := rValue.Len()
idx := sort.Search(vlen, func(idx int) bool {
@@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue {
return iter.ToSlice()
}
-// Equivalent returns a value that may be used as a map key. The Distinct type
-// guarantees that the result will equal the equivalent. Distinct value of any
+// Equivalent returns a value that may be used as a map key. Equal Distinct
+// values are very likely to be equivalent attribute Sets. Distinct value of any
// attribute set with the same elements as this, where sets are made unique by
// choosing the last value in the input for any given key.
func (l *Set) Equivalent() Distinct {
- if l == nil || !l.equivalent.Valid() {
- return emptySet.equivalent
+ if l == nil || l.hash == 0 {
+ return Distinct{hash: emptySet.hash}
}
- return l.equivalent
+ return Distinct{hash: l.hash}
}
// Equals reports whether the argument set is equivalent to this set.
func (l *Set) Equals(o *Set) bool {
- return l.Equivalent() == o.Equivalent()
+ if l.Equivalent() != o.Equivalent() {
+ return false
+ }
+ if l == nil || l.hash == 0 {
+ l = &emptySet
+ }
+ if o == nil || o.hash == 0 {
+ o = &emptySet
+ }
+ return l.data == o.data
}
// Encoded returns the encoded form of this set, according to encoder.
@@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string {
return encoder.Encode(l.Iter())
}
-func empty() Set {
- return Set{
- equivalent: emptySet.equivalent,
- }
-}
-
// NewSet returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
@@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// Check for empty set.
if len(kvs) == 0 {
- return empty(), nil
+ return emptySet, nil
}
// Stable sort so the following de-duplication can implement
@@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
if filter != nil {
if div := filteredToFront(kvs, filter); div != 0 {
- return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
+ return newSet(kvs[div:]), kvs[:div]
}
}
- return Set{equivalent: computeDistinct(kvs)}, nil
+ return newSet(kvs), nil
}
// NewSetWithSortableFiltered returns a new Set.
@@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if first == 0 {
// It is safe to assume len(slice) >= 1 given we found at least one
// attribute above that needs to be filtered out.
- return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
+ return newSet(slice[1:]), slice[:1]
}
// Move the filtered slice[first] to the front (preserving order).
@@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
// Do not re-evaluate re(slice[first+1:]).
div := filteredToFront(slice[1:first+1], re) + 1
- return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
+ return newSet(slice[div:]), slice[:div]
}
-// computeDistinct returns a Distinct using either the fixed- or
-// reflect-oriented code path, depending on the size of the input. The input
-// slice is assumed to already be sorted and de-duplicated.
-func computeDistinct(kvs []KeyValue) Distinct {
- iface := computeDistinctFixed(kvs)
- if iface == nil {
- iface = computeDistinctReflect(kvs)
+// newSet returns a new set based on the sorted and uniqued kvs.
+func newSet(kvs []KeyValue) Set {
+ s := Set{
+ hash: hashKVs(kvs),
+ data: computeDataFixed(kvs),
}
- return Distinct{
- iface: iface,
+ if s.data == nil {
+ s.data = computeDataReflect(kvs)
}
+ return s
}
-// computeDistinctFixed computes a Distinct for small slices. It returns nil
-// if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) any {
+// computeDataFixed computes a Set data for small slices. It returns nil if the
+// input is too large for this code path.
+func computeDataFixed(kvs []KeyValue) any {
switch len(kvs) {
case 1:
return [1]KeyValue(kvs)
@@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) any {
}
}
-// computeDistinctReflect computes a Distinct using reflection, works for any
-// size input.
-func computeDistinctReflect(kvs []KeyValue) any {
+// computeDataReflect computes a Set data using reflection, works for any size
+// input.
+func computeDataReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@@ -383,7 +408,7 @@ func computeDistinctReflect(kvs []KeyValue) any {
// MarshalJSON returns the JSON encoding of the Set.
func (l *Set) MarshalJSON() ([]byte, error) {
- return json.Marshal(l.equivalent.iface)
+ return json.Marshal(l.data)
}
// MarshalLog is the marshaling function used by the logging system to represent this Set.
diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
index e584b247..24f1fa37 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
@@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
func (i Type) String() string {
- if i < 0 || i >= Type(len(_Type_index)-1) {
+ idx := int(i) - 0
+ if i < 0 || idx >= len(_Type_index)-1 {
return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
}
- return _Type_name[_Type_index[i]:_Type_index[i+1]]
+ return _Type_name[_Type_index[idx]:_Type_index[idx+1]]
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index 653c33a8..5931e712 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -66,8 +66,7 @@ func IntValue(v int) Value {
// IntSliceValue creates an INTSLICE Value.
func IntSliceValue(v []int) Value {
- var int64Val int64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]()))
for i, val := range v {
cp.Elem().Index(i).SetInt(int64(val))
}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index f83a448e..c4093e49 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -317,7 +317,7 @@ func parseMember(member string) (Member, error) {
keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found {
// Parse the member properties.
- for _, pStr := range strings.Split(properties, propertyDelimiter) {
+ for pStr := range strings.SplitSeq(properties, propertyDelimiter) {
p, err := parseProperty(pStr)
if err != nil {
return newInvalidMember(), err
@@ -480,7 +480,7 @@ func Parse(bStr string) (Baggage, error) {
}
b := make(baggage.List)
- for _, memberStr := range strings.Split(bStr, listDelimiter) {
+ for memberStr := range strings.SplitSeq(bStr, listDelimiter) {
m, err := parseMember(memberStr)
if err != nil {
return Baggage{}, err
@@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// If we couldn't find any valid key character,
// it means the key is either empty or invalid.
if keyStart == keyEnd {
- return
+ return p, ok
}
// Skip spaces after the key: " key< >= value ".
@@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// A key can have no value, like: " key ".
ok = true
p.key = s[keyStart:keyEnd]
- return
+ return p, ok
}
// If we have not reached the end and we can't find the '=' delimiter,
// it means the property is invalid.
if s[index] != keyValueDelimiter[0] {
- return
+ return p, ok
}
// Attempting to parse the value.
@@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// we have not reached the end, it means the property is
// invalid, something like: " key = value value1".
if index != len(s) {
- return
+ return p, ok
}
// Decode a percent-encoded value.
rawVal := s[valueStart:valueEnd]
unescapeVal, err := url.PathUnescape(rawVal)
if err != nil {
- return
+ return p, ok
}
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
@@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
p.hasValue = true
p.value = value
- return
+ return p, ok
}
func skipSpace(s string, offset int) int {
diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
index a311fbb4..676e7911 100644
--- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
+++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -1,4 +1,4 @@
# This is a renovate-friendly source of Docker images.
FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python
-FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver
+FROM otel/weaver:v0.20.0@sha256:fa4f1c6954ecea78ab1a4e865bd6f5b4aaba80c1896f9f4a11e2c361d04e197e AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
index 492480f8..756bf796 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
@@ -16,7 +16,6 @@ import (
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
- "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
@@ -101,7 +100,7 @@ func (c *client) Shutdown(ctx context.Context) error {
//
// Retryable errors from the server will be handled according to any
// RetryConfig the client was created with.
-func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
+func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) (uploadErr error) {
// The otlpmetric.Exporter synchronizes access to client methods, and
// ensures this is not called after the Exporter is shutdown. Only thing
// to do here is send data.
@@ -116,7 +115,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
ctx, cancel := c.exportContext(ctx)
defer cancel()
- return c.requestFunc(ctx, func(iCtx context.Context) error {
+ return errors.Join(uploadErr, c.requestFunc(ctx, func(iCtx context.Context) error {
resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{
ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
})
@@ -124,8 +123,8 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
msg := resp.PartialSuccess.GetErrorMessage()
n := resp.PartialSuccess.GetRejectedDataPoints()
if n != 0 || msg != "" {
- err := internal.MetricPartialSuccessError(n, msg)
- otel.Handle(err)
+ e := internal.MetricPartialSuccessError(n, msg)
+ uploadErr = errors.Join(uploadErr, e)
}
}
// nil is converted to OK.
@@ -134,7 +133,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
return nil
}
return err
- })
+ }))
}
// exportContext returns a copy of parent with an appropriate deadline and
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
index b29cd11a..f6e4f0d0 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
@@ -20,7 +20,7 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/o
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
-//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
index b54a173b..7f947273 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
@@ -18,7 +18,6 @@ import (
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/metric"
- "go.opentelemetry.io/otel/sdk/metric/metricdata"
)
// DefaultEnvOptionsReader is the default environments reader.
@@ -165,11 +164,11 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector))
if s, ok := e.GetEnvValue(n); ok {
switch strings.ToLower(s) {
case "cumulative":
- fn(cumulativeTemporality)
+ fn(metric.CumulativeTemporalitySelector)
case "delta":
- fn(deltaTemporality)
+ fn(metric.DeltaTemporalitySelector)
case "lowmemory":
- fn(lowMemory)
+ fn(metric.LowMemoryTemporalitySelector)
default:
global.Warn(
"OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.",
@@ -181,28 +180,6 @@ func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector))
}
}
-func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
- return metricdata.CumulativeTemporality
-}
-
-func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
- switch ik {
- case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
- return metricdata.DeltaTemporality
- default:
- return metricdata.CumulativeTemporality
- }
-}
-
-func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
- switch ik {
- case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
- return metricdata.DeltaTemporality
- default:
- return metricdata.CumulativeTemporality
- }
-}
-
func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
return func(e *envconfig.EnvOptionsReader) {
if s, ok := e.GetEnvValue(n); ok {
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
index 6af5591e..243abcb1 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
@@ -29,6 +29,17 @@ func (ps PartialSuccess) Error() string {
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
}
+// As returns true if ps can be assigned to target and makes the assignment.
+// Otherwise, it returns false. This supports the errors.As() interface.
+func (ps PartialSuccess) As(target any) bool {
+ t, ok := target.(*PartialSuccess)
+ if !ok {
+ return false
+ }
+ *t = ps
+ return true
+}
+
// Is supports the errors.Is() interface.
func (ps PartialSuccess) Is(err error) bool {
_, ok := err.(PartialSuccess)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
index 80691ac3..c14e5ae7 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
@@ -94,6 +94,11 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
return err
}
+ // Check if context is canceled before attempting to wait and retry.
+ if ctx.Err() != nil {
+ return fmt.Errorf("%w: %w", ctx.Err(), err)
+ }
+
if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
return fmt.Errorf("max retry time elapsed: %w", err)
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
index 7909cac5..e4ec772a 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
@@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme
// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
func Version() string {
- return "1.38.0"
+ return "1.40.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
index 379bc817..d431fc45 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
@@ -113,7 +113,7 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
if psid := sd.Parent().SpanID(); psid.IsValid() {
s.ParentSpanId = psid[:]
}
- s.Flags = buildSpanFlags(sd.Parent())
+ s.Flags = buildSpanFlagsWith(sd.SpanContext().TraceFlags(), sd.Parent())
return s
}
@@ -159,7 +159,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
tid := otLink.SpanContext.TraceID()
sid := otLink.SpanContext.SpanID()
- flags := buildSpanFlags(otLink.SpanContext)
+ flags := buildSpanFlagsWith(otLink.SpanContext.TraceFlags(), otLink.SpanContext)
sl = append(sl, &tracepb.Span_Link{
TraceId: tid[:],
@@ -172,13 +172,15 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
return sl
}
-func buildSpanFlags(sc trace.SpanContext) uint32 {
- flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK
- if sc.IsRemote() {
- flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
+func buildSpanFlagsWith(tf trace.TraceFlags, parent trace.SpanContext) uint32 {
+ // Lower 8 bits are the W3C TraceFlags; always indicate that we know whether the parent is remote
+ flags := uint32(tf) | uint32(tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK)
+ // Set the parent-is-remote bit when applicable
+ if parent.IsRemote() {
+ flags |= uint32(tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK)
}
- return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative
+ return flags // nolint:gosec // Flags is a bitmask and can't be negative
}
// spanEvents transforms span Events to an OTLP span events.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
index 4b4cc76f..76b7cd46 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -17,9 +17,10 @@ import (
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
- "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
)
@@ -45,6 +46,9 @@ type client struct {
conn *grpc.ClientConn
tscMu sync.RWMutex
tsc coltracepb.TraceServiceClient
+
+ instID int64
+ inst *observ.Instrumentation
}
// Compile time check *client implements otlptrace.Client.
@@ -68,6 +72,7 @@ func newClient(opts ...Option) *client {
stopCtx: ctx,
stopFunc: cancel,
conn: cfg.GRPCConn,
+ instID: counter.NextExporterID(),
}
if len(cfg.Traces.Headers) > 0 {
@@ -92,13 +97,24 @@ func (c *client) Start(context.Context) error {
c.conn = conn
}
+ // Initialize the instrumentation if not already done.
+ //
+ // Initialize here instead of NewClient to allow any errors to be passed
+ // back to the caller and so that any setup of the environment variables to
+ // enable instrumentation can be set via code.
+ var err error
+ if c.inst == nil {
+ target := c.conn.CanonicalTarget()
+ c.inst, err = observ.NewInstrumentation(c.instID, target)
+ }
+
// The otlptrace.Client interface states this method is called just once,
// so no need to check if already started.
c.tscMu.Lock()
c.tsc = coltracepb.NewTraceServiceClient(c.conn)
c.tscMu.Unlock()
- return nil
+ return err
}
var errAlreadyStopped = errors.New("the client is already stopped")
@@ -174,7 +190,7 @@ var errShutdown = errors.New("the client is shutdown")
//
// Retryable errors from the server will be handled according to any
// RetryConfig the client was created with.
-func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
+func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) (uploadErr error) {
// Hold a read lock to ensure a shut down initiated after this starts does
// not abandon the export. This read lock acquire has less priority than a
// write lock acquire (i.e. Stop), meaning if the client is shutting down
@@ -189,6 +205,12 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
ctx, cancel := c.exportContext(ctx)
defer cancel()
+ var code codes.Code
+ if c.inst != nil {
+ op := c.inst.ExportSpans(ctx, len(protoSpans))
+ defer func() { op.End(uploadErr, code) }()
+ }
+
return c.requestFunc(ctx, func(iCtx context.Context) error {
resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
ResourceSpans: protoSpans,
@@ -197,16 +219,17 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
msg := resp.PartialSuccess.GetErrorMessage()
n := resp.PartialSuccess.GetRejectedSpans()
if n != 0 || msg != "" {
- err := internal.TracePartialSuccessError(n, msg)
- otel.Handle(err)
+ e := internal.TracePartialSuccessError(n, msg)
+ uploadErr = errors.Join(uploadErr, e)
}
}
// nil is converted to OK.
- if status.Code(err) == codes.OK {
+ code = status.Code(err)
+ if code == codes.OK {
// Success.
- return nil
+ return uploadErr
}
- return err
+ return errors.Join(uploadErr, err)
})
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go
new file mode 100644
index 00000000..323b2a2c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter/counter.go
@@ -0,0 +1,31 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/counter/counter.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package counter provides a simple counter for generating unique IDs.
+//
+// This package is used to generate unique IDs while allowing testing packages
+// to reset the counter.
+package counter // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter"
+
+import "sync/atomic"
+
+// exporterN is a global 0-based count of the number of exporters created.
+var exporterN atomic.Int64
+
+// NextExporterID returns the next unique ID for an exporter.
+func NextExporterID() int64 {
+ const inc = 1
+ return exporterN.Add(inc) - inc
+}
+
+// SetExporterID sets the exporter ID counter to v and returns the previous
+// value.
+//
+// This function is useful for testing purposes, allowing you to reset the
+// counter. It should not be used in production code.
+func SetExporterID(v int64) int64 {
+ return exporterN.Swap(v)
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
index b6e6b10f..7fe9c9f3 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
@@ -23,3 +23,12 @@ package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ot
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/x/x.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\" }" --out=x/x.go
+//go:generate gotmpl --body=../../../../../internal/shared/x/x_test.go.tmpl "--data={}" --out=x/x_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target.go.tmpl "--data={ \"pkg\": \"observ\", \"pkg_path\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ\" }" --out=observ/target.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/observ/target_test.go.tmpl "--data={ \"pkg\": \"observ\" }" --out=observ/target_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/counter/counter.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter\" }" --out=counter/counter.go
+//go:generate gotmpl --body=../../../../../internal/shared/counter/counter_test.go.tmpl "--data={}" --out=counter/counter_test.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go
new file mode 100644
index 00000000..0dd54e4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/doc.go
@@ -0,0 +1,6 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package observ provides experimental observability instrumentation for the
+// otlptracegrpc exporter.
+package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go
new file mode 100644
index 00000000..d4a69f4d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/instrumentation.go
@@ -0,0 +1,342 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc/codes"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
+ "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv"
+)
+
+const (
+ // ScopeName is the unique name of the meter used for instrumentation.
+ ScopeName = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ"
+
+ // SchemaURL is the schema URL of the metrics produced by this
+ // instrumentation.
+ SchemaURL = semconv.SchemaURL
+
+ // Version is the current version of this instrumentation.
+ //
+ // This matches the version of the exporter.
+ Version = internal.Version
+)
+
+var (
+ measureAttrsPool = &sync.Pool{
+ New: func() any {
+ const n = 1 + // component.name
+ 1 + // component.type
+ 1 + // server.addr
+ 1 + // server.port
+ 1 + // error.type
+ 1 // rpc.grpc.status_code
+ s := make([]attribute.KeyValue, 0, n)
+ // Return a pointer to a slice instead of a slice itself
+ // to avoid allocations on every call.
+ return &s
+ },
+ }
+
+ addOptPool = &sync.Pool{
+ New: func() any {
+ const n = 1 // WithAttributeSet
+ o := make([]metric.AddOption, 0, n)
+ return &o
+ },
+ }
+
+ recordOptPool = &sync.Pool{
+ New: func() any {
+ const n = 1 // WithAttributeSet
+ o := make([]metric.RecordOption, 0, n)
+ return &o
+ },
+ }
+)
+
+func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) }
+
+func put[T any](p *sync.Pool, s *[]T) {
+ *s = (*s)[:0] // Reset.
+ p.Put(s)
+}
+
+// ComponentName returns the component name for the exporter with the
+// provided ID.
+func ComponentName(id int64) string {
+ t := semconv.OTelComponentTypeOtlpGRPCSpanExporter.Value.AsString()
+ return fmt.Sprintf("%s/%d", t, id)
+}
+
+// Instrumentation is experimental instrumentation for the exporter.
+type Instrumentation struct {
+ inflightSpans metric.Int64UpDownCounter
+ exportedSpans metric.Int64Counter
+ opDuration metric.Float64Histogram
+
+ attrs []attribute.KeyValue
+ addOpt metric.AddOption
+ recOpt metric.RecordOption
+}
+
+// NewInstrumentation returns instrumentation for an OTLP over gPRC trace
+// exporter with the provided ID using the global MeterProvider.
+//
+// The id should be the unique exporter instance ID. It is used
+// to set the "component.name" attribute.
+//
+// The target is the endpoint the exporter is exporting to.
+//
+// If the experimental observability is disabled, nil is returned.
+func NewInstrumentation(id int64, target string) (*Instrumentation, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ attrs := BaseAttrs(id, target)
+ i := &Instrumentation{
+ attrs: attrs,
+ addOpt: metric.WithAttributeSet(attribute.NewSet(attrs...)),
+
+ // Do not modify attrs (NewSet sorts in-place), make a new slice.
+ recOpt: metric.WithAttributeSet(attribute.NewSet(append(
+ // Default to OK status code.
+ []attribute.KeyValue{
+ semconv.RPCResponseStatusCode(codes.OK.String()),
+ },
+ attrs...,
+ )...)),
+ }
+
+ mp := otel.GetMeterProvider()
+ m := mp.Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(Version),
+ metric.WithSchemaURL(SchemaURL),
+ )
+
+ var err error
+
+ inflightSpans, e := otelconv.NewSDKExporterSpanInflight(m)
+ if e != nil {
+ e = fmt.Errorf("failed to create span inflight metric: %w", e)
+ err = errors.Join(err, e)
+ }
+ i.inflightSpans = inflightSpans.Inst()
+
+ exportedSpans, e := otelconv.NewSDKExporterSpanExported(m)
+ if e != nil {
+ e = fmt.Errorf("failed to create span exported metric: %w", e)
+ err = errors.Join(err, e)
+ }
+ i.exportedSpans = exportedSpans.Inst()
+
+ opDuration, e := otelconv.NewSDKExporterOperationDuration(m)
+ if e != nil {
+ e = fmt.Errorf("failed to create operation duration metric: %w", e)
+ err = errors.Join(err, e)
+ }
+ i.opDuration = opDuration.Inst()
+
+ return i, err
+}
+
+// BaseAttrs returns the base attributes for the exporter with the provided ID
+// and target.
+//
+// The id should be the unique exporter instance ID. It is used
+// to set the "component.name" attribute.
+//
+// The target is the gRPC target the exporter is exporting to. It is expected
+// to be the output of the Client's CanonicalTarget method.
+func BaseAttrs(id int64, target string) []attribute.KeyValue {
+ host, port, err := ParseCanonicalTarget(target)
+ if err != nil || (host == "" && port < 0) {
+ if err != nil {
+ global.Debug("failed to parse target", "target", target, "error", err)
+ }
+ return []attribute.KeyValue{
+ semconv.OTelComponentName(ComponentName(id)),
+ semconv.OTelComponentTypeOtlpGRPCSpanExporter,
+ }
+ }
+
+ // Do not use append so the slice is exactly allocated.
+
+ if port < 0 {
+ return []attribute.KeyValue{
+ semconv.OTelComponentName(ComponentName(id)),
+ semconv.OTelComponentTypeOtlpGRPCSpanExporter,
+ semconv.ServerAddress(host),
+ }
+ }
+
+ if host == "" {
+ return []attribute.KeyValue{
+ semconv.OTelComponentName(ComponentName(id)),
+ semconv.OTelComponentTypeOtlpGRPCSpanExporter,
+ semconv.ServerPort(port),
+ }
+ }
+
+ return []attribute.KeyValue{
+ semconv.OTelComponentName(ComponentName(id)),
+ semconv.OTelComponentTypeOtlpGRPCSpanExporter,
+ semconv.ServerAddress(host),
+ semconv.ServerPort(port),
+ }
+}
+
+// ExportSpans instruments the ExportSpans method of the exporter. It returns
+// an [ExportOp] that must have its [ExportOp.End] method called when the
+// ExportSpans method returns.
+func (i *Instrumentation) ExportSpans(ctx context.Context, nSpans int) ExportOp {
+ start := time.Now()
+
+ addOpt := get[metric.AddOption](addOptPool)
+ defer put(addOptPool, addOpt)
+ *addOpt = append(*addOpt, i.addOpt)
+ i.inflightSpans.Add(ctx, int64(nSpans), *addOpt...)
+
+ return ExportOp{
+ ctx: ctx,
+ start: start,
+ nSpans: int64(nSpans),
+ inst: i,
+ }
+}
+
+// ExportOp tracks the operation being observed by [Instrumentation.ExportSpans].
+type ExportOp struct {
+ ctx context.Context
+ start time.Time
+ nSpans int64
+
+ inst *Instrumentation
+}
+
+// End completes the observation of the operation being observed by a call to
+// [Instrumentation.ExportSpans].
+//
+// Any error that is encountered is provided as err.
+//
+// If err is not nil, all spans will be recorded as failures unless error is of
+// type [internal.PartialSuccess]. In the case of a PartialSuccess, the number
+// of successfully exported spans will be determined by inspecting the
+// RejectedItems field of the PartialSuccess.
+func (e ExportOp) End(err error, code codes.Code) {
+ addOpt := get[metric.AddOption](addOptPool)
+ defer put(addOptPool, addOpt)
+ *addOpt = append(*addOpt, e.inst.addOpt)
+
+ e.inst.inflightSpans.Add(e.ctx, -e.nSpans, *addOpt...)
+
+ success := successful(e.nSpans, err)
+ // Record successfully exported spans, even if the value is 0 which are
+ // meaningful to distribution aggregations.
+ e.inst.exportedSpans.Add(e.ctx, success, *addOpt...)
+
+ if err != nil {
+ attrs := get[attribute.KeyValue](measureAttrsPool)
+ defer put(measureAttrsPool, attrs)
+ *attrs = append(*attrs, e.inst.attrs...)
+ *attrs = append(*attrs, semconv.ErrorType(err))
+
+ // Do not inefficiently make a copy of attrs by using
+ // WithAttributes instead of WithAttributeSet.
+ o := metric.WithAttributeSet(attribute.NewSet(*attrs...))
+ // Reset addOpt with new attribute set.
+ *addOpt = append((*addOpt)[:0], o)
+
+ e.inst.exportedSpans.Add(e.ctx, e.nSpans-success, *addOpt...)
+ }
+
+ recOpt := get[metric.RecordOption](recordOptPool)
+ defer put(recordOptPool, recOpt)
+ *recOpt = append(*recOpt, e.inst.recordOption(err, code))
+
+ d := time.Since(e.start).Seconds()
+ e.inst.opDuration.Record(e.ctx, d, *recOpt...)
+}
+
+// recordOption returns a RecordOption with attributes representing the
+// outcome of the operation being recorded.
+//
+// If err is nil and code is codes.OK, the default recOpt of the
+// Instrumentation is returned.
+//
+// If err is not nil or code is not codes.OK, a new RecordOption is returned
+// with the base attributes of the Instrumentation plus the rpc.grpc.status_code
+// attribute set to the provided code, and if err is not nil, the error.type
+// attribute set to the type of the error.
+func (i *Instrumentation) recordOption(err error, code codes.Code) metric.RecordOption {
+ if err == nil && code == codes.OK {
+ return i.recOpt
+ }
+
+ attrs := get[attribute.KeyValue](measureAttrsPool)
+ defer put(measureAttrsPool, attrs)
+ *attrs = append(*attrs, i.attrs...)
+
+ *attrs = append(*attrs, semconv.RPCResponseStatusCode(code.String()))
+ if err != nil {
+ *attrs = append(*attrs, semconv.ErrorType(err))
+ }
+
+ // Do not inefficiently make a copy of attrs by using WithAttributes
+ // instead of WithAttributeSet.
+ return metric.WithAttributeSet(attribute.NewSet(*attrs...))
+}
+
+// successful returns the number of successfully exported spans out of the n
+// that were exported based on the provided error.
+//
+// If err is nil, n is returned. All spans were successfully exported.
+//
+// If err is not nil and not an [internal.PartialSuccess] error, 0 is returned.
+// It is assumed all spans failed to be exported.
+//
+// If err is an [internal.PartialSuccess] error, the number of successfully
+// exported spans is computed by subtracting the RejectedItems field from n. If
+// RejectedItems is negative, n is returned. If RejectedItems is greater than
+// n, 0 is returned.
+func successful(n int64, err error) int64 {
+ if err == nil {
+ return n // All spans successfully exported.
+ }
+ // Split rejection calculation so successful is inlinable.
+ return n - rejected(n, err)
+}
+
+var errPartialPool = &sync.Pool{
+ New: func() any { return new(internal.PartialSuccess) },
+}
+
+// rejected returns how many out of the n spans exporter were rejected based on
+// the provided non-nil err.
+func rejected(n int64, err error) int64 {
+ ps := errPartialPool.Get().(*internal.PartialSuccess)
+ defer errPartialPool.Put(ps)
+ // Check for partial success.
+ if errors.As(err, ps) {
+ // Bound RejectedItems to [0, n]. This should not be needed,
+ // but be defensive as this is from an external source.
+ return min(max(ps.RejectedItems, 0), n)
+ }
+ return n // All spans rejected.
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go
new file mode 100644
index 00000000..34eee27d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/target.go
@@ -0,0 +1,143 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/observ/target.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package observ // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ"
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "strconv"
+ "strings"
+)
+
+const (
+ schemeUnix = "unix"
+ schemeUnixAbstract = "unix-abstract"
+)
+
+// ParseCanonicalTarget parses a target string and returns the extracted host
+// (domain address or IP), the target port, or an error.
+//
+// If no port is specified, -1 is returned.
+//
+// If no host is specified, an empty string is returned.
+//
+// The target string is expected to always have the form
+// "://[authority]/". For example:
+// - "dns:///example.com:42"
+// - "dns://8.8.8.8/example.com:42"
+// - "unix:///path/to/socket"
+// - "unix-abstract:///socket-name"
+// - "passthrough:///192.34.2.1:42"
+//
+// The target is expected to come from the CanonicalTarget method of a gRPC
+// Client.
+func ParseCanonicalTarget(target string) (string, int, error) {
+ const sep = "://"
+
+ // Find scheme. Do not allocate the string by using url.Parse.
+ idx := strings.Index(target, sep)
+ if idx == -1 {
+ return "", -1, fmt.Errorf("invalid target %q: missing scheme", target)
+ }
+ scheme, endpoint := target[:idx], target[idx+len(sep):]
+
+ // Check for unix schemes.
+ if scheme == schemeUnix || scheme == schemeUnixAbstract {
+ return parseUnix(endpoint)
+ }
+
+ // Strip leading slash and any authority.
+ if i := strings.Index(endpoint, "/"); i != -1 {
+ endpoint = endpoint[i+1:]
+ }
+
+ // DNS, passthrough, and custom resolvers.
+ return parseEndpoint(endpoint)
+}
+
+// parseUnix parses unix socket targets.
+func parseUnix(endpoint string) (string, int, error) {
+ // Format: unix[-abstract]://path
+ //
+ // We should have "/path" (empty authority) if valid.
+ if len(endpoint) >= 1 && endpoint[0] == '/' {
+ // Return the full path including leading slash.
+ return endpoint, -1, nil
+ }
+
+ // If there's no leading slash, it means there might be an authority
+ // Check for authority case (should error): "authority/path"
+ if slashIdx := strings.Index(endpoint, "/"); slashIdx > 0 {
+ return "", -1, fmt.Errorf("invalid (non-empty) authority: %s", endpoint[:slashIdx])
+ }
+
+ return "", -1, errors.New("invalid unix target format")
+}
+
+// parseEndpoint parses an endpoint from a gRPC target.
+//
+// It supports the following formats:
+// - "host"
+// - "host%zone"
+// - "host:port"
+// - "host%zone:port"
+// - "ipv4"
+// - "ipv4%zone"
+// - "ipv4:port"
+// - "ipv4%zone:port"
+// - "ipv6"
+// - "ipv6%zone"
+// - "[ipv6]"
+// - "[ipv6%zone]"
+// - "[ipv6]:port"
+// - "[ipv6%zone]:port"
+//
+// It returns the host or host%zone (domain address or IP), the port (or -1 if
+// not specified), or an error if the input is not a valid.
+func parseEndpoint(endpoint string) (string, int, error) {
+ // First check if the endpoint is just an IP address.
+ if ip := parseIP(endpoint); ip != "" {
+ return ip, -1, nil
+ }
+
+ // If there's no colon, there is no port (IPv6 with no port checked above).
+ if !strings.Contains(endpoint, ":") {
+ return endpoint, -1, nil
+ }
+
+ host, portStr, err := net.SplitHostPort(endpoint)
+ if err != nil {
+ return "", -1, fmt.Errorf("invalid host:port %q: %w", endpoint, err)
+ }
+
+ const base, bitSize = 10, 16
+ port16, err := strconv.ParseUint(portStr, base, bitSize)
+ if err != nil {
+ return "", -1, fmt.Errorf("invalid port %q: %w", portStr, err)
+ }
+ port := int(port16) // port is guaranteed to be in the range [0, 65535].
+
+ return host, port, nil
+}
+
+// parseIP attempts to parse the entire endpoint as an IP address.
+// It returns the normalized string form of the IP if successful,
+// or an empty string if parsing fails.
+func parseIP(ip string) string {
+ // Strip leading and trailing brackets for IPv6 addresses.
+ if len(ip) >= 2 && ip[0] == '[' && ip[len(ip)-1] == ']' {
+ ip = ip[1 : len(ip)-1]
+ }
+ addr, err := netip.ParseAddr(ip)
+ if err != nil {
+ return ""
+ }
+ // Return the normalized string form of the IP.
+ return addr.String()
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
index 1c465942..a811a07b 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
@@ -29,6 +29,17 @@ func (ps PartialSuccess) Error() string {
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
}
+// As returns true if ps can be assigned to target and makes the assignment.
+// Otherwise, it returns false. This supports the errors.As() interface.
+func (ps PartialSuccess) As(target any) bool {
+ t, ok := target.(*PartialSuccess)
+ if !ok {
+ return false
+ }
+ *t = ps
+ return true
+}
+
// Is supports the errors.Is() interface.
func (ps PartialSuccess) Is(err error) bool {
_, ok := err.(PartialSuccess)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
index 259a898a..a7b8e81a 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -94,6 +94,11 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
return err
}
+ // Check if context is canceled before attempting to wait and retry.
+ if ctx.Err() != nil {
+ return fmt.Errorf("%w: %w", ctx.Err(), err)
+ }
+
if maxElapsedTime != 0 && time.Since(startTime) > maxElapsedTime {
return fmt.Errorf("max retry time elapsed: %w", err)
}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go
new file mode 100644
index 00000000..19fac1b7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/version.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+// Version is the current release version of the OpenTelemetry OTLP gRPC trace
+// exporter in use.
+const Version = "1.40.0"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md
similarity index 54%
rename from vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md
index feec16fa..15a3011b 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/README.md
@@ -1,33 +1,34 @@
# Experimental Features
-The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification.
-These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+The `otlptracegrpc` exporter contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the `otlptracegrpc` exporter prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
-These features may change in backwards incompatible ways as feedback is applied.
+These feature may change in backwards incompatible ways as feedback is applied.
See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
## Features
-- [Self-Observability](#self-observability)
+- [Observability](#observability)
-### Self-Observability
+### Observability
-The SDK provides a self-observability feature that allows you to monitor the SDK itself.
+The `otlptracegrpc` exporter provides a observability feature that allows you to monitor the SDK itself.
-To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`.
+To opt-in, set the environment variable `OTEL_GO_X_OBSERVABILITY` to `true`.
When enabled, the SDK will create the following metrics using the global `MeterProvider`:
-- `otel.sdk.span.live`
-- `otel.sdk.span.started`
+- `otel.sdk.exporter.span.inflight`
+- `otel.sdk.exporter.span.exported`
+- `otel.sdk.exporter.operation.duration`
Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics.
-[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md
+[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.37.0/docs/otel/sdk-metrics.md
## Compatibility and Stability
-Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../../../VERSIONING.md).
These features may be removed or modified in successive version releases, including patch versions.
When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go
new file mode 100644
index 00000000..4e89c652
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/observ.go
@@ -0,0 +1,22 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x"
+
+import "strings"
+
+// Observability is an experimental feature flag that determines if exporter
+// observability metrics are enabled.
+//
+// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var Observability = newFeature(
+ []string{"OBSERVABILITY"},
+ func(v string) (string, bool) {
+ if strings.EqualFold(v, "true") {
+ return v, true
+ }
+ return "", false
+ },
+)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go
similarity index 55%
rename from vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go
index 2fcbbcc6..741ba62c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x/x.go
@@ -1,45 +1,38 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/x/x.go.tmpl
+
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace].
-package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x"
+// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc].
+package x // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x"
import (
"os"
- "strings"
)
-// SelfObservability is an experimental feature flag that determines if SDK
-// self-observability metrics are enabled.
-//
-// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable
-// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
-// will also enable this).
-var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) {
- if strings.EqualFold(v, "true") {
- return v, true
- }
- return "", false
-})
-
// Feature is an experimental feature control flag. It provides a uniform way
// to interact with these feature flags and parse their values.
type Feature[T any] struct {
- key string
+ keys []string
parse func(v string) (T, bool)
}
-func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] {
const envKeyRoot = "OTEL_GO_X_"
+ keys := make([]string, 0, len(suffix))
+ for _, s := range suffix {
+ keys = append(keys, envKeyRoot+s)
+ }
return Feature[T]{
- key: envKeyRoot + suffix,
+ keys: keys,
parse: parse,
}
}
-// Key returns the environment variable key that needs to be set to enable the
+// Keys returns the environment variable keys that can be set to enable the
// feature.
-func (f Feature[T]) Key() string { return f.key }
+func (f Feature[T]) Keys() []string { return f.keys }
// Lookup returns the user configured value for the feature and true if the
// user has enabled the feature. Otherwise, if the feature is not enabled, a
@@ -49,11 +42,13 @@ func (f Feature[T]) Lookup() (v T, ok bool) {
//
// > The SDK MUST interpret an empty value of an environment variable the
// > same way as when the variable is unset.
- vRaw := os.Getenv(f.key)
- if vRaw == "" {
- return v, ok
+ for _, key := range f.keys {
+ vRaw := os.Getenv(key)
+ if vRaw != "" {
+ return f.parse(vRaw)
+ }
}
- return f.parse(vRaw)
+ return v, ok
}
// Enabled reports whether the feature is enabled.
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
index 3b79c1a0..370cd2a6 100644
--- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string {
- return "1.38.0"
+ return "1.40.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
index ae92a425..55255cdd 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
@@ -229,6 +229,13 @@ func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOpt
}
}
+func (i *sfCounter) Enabled(ctx context.Context) bool {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64Counter).Enabled(ctx)
+ }
+ return false
+}
+
type sfUpDownCounter struct {
embedded.Float64UpDownCounter
@@ -255,6 +262,13 @@ func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.
}
}
+func (i *sfUpDownCounter) Enabled(ctx context.Context) bool {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64UpDownCounter).Enabled(ctx)
+ }
+ return false
+}
+
type sfHistogram struct {
embedded.Float64Histogram
@@ -281,6 +295,13 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco
}
}
+func (i *sfHistogram) Enabled(ctx context.Context) bool {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64Histogram).Enabled(ctx)
+ }
+ return false
+}
+
type sfGauge struct {
embedded.Float64Gauge
@@ -307,6 +328,13 @@ func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOp
}
}
+func (i *sfGauge) Enabled(ctx context.Context) bool {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64Gauge).Enabled(ctx)
+ }
+ return false
+}
+
type siCounter struct {
embedded.Int64Counter
@@ -333,6 +361,13 @@ func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption)
}
}
+func (i *siCounter) Enabled(ctx context.Context) bool {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64Counter).Enabled(ctx)
+ }
+ return false
+}
+
type siUpDownCounter struct {
embedded.Int64UpDownCounter
@@ -359,6 +394,13 @@ func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOp
}
}
+func (i *siUpDownCounter) Enabled(ctx context.Context) bool {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64UpDownCounter).Enabled(ctx)
+ }
+ return false
+}
+
type siHistogram struct {
embedded.Int64Histogram
@@ -385,6 +427,13 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record
}
}
+func (i *siHistogram) Enabled(ctx context.Context) bool {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64Histogram).Enabled(ctx)
+ }
+ return false
+}
+
type siGauge struct {
embedded.Int64Gauge
@@ -410,3 +459,10 @@ func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOpti
ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
}
}
+
+func (i *siGauge) Enabled(ctx context.Context) bool {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64Gauge).Enabled(ctx)
+ }
+ return false
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
index adb37b5b..50043d66 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -105,7 +105,7 @@ type delegatedInstrument interface {
setDelegate(metric.Meter)
}
-// instID are the identifying properties of a instrument.
+// instID are the identifying properties of an instrument.
type instID struct {
// name is the name of the stream.
name string
@@ -157,7 +157,7 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption)
cfg := metric.NewInt64CounterConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*siCounter)(nil)),
+ kind: reflect.TypeFor[*siCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -183,7 +183,7 @@ func (m *meter) Int64UpDownCounter(
cfg := metric.NewInt64UpDownCounterConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*siUpDownCounter)(nil)),
+ kind: reflect.TypeFor[*siUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -206,7 +206,7 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti
cfg := metric.NewInt64HistogramConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*siHistogram)(nil)),
+ kind: reflect.TypeFor[*siHistogram](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -229,7 +229,7 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met
cfg := metric.NewInt64GaugeConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*siGauge)(nil)),
+ kind: reflect.TypeFor[*siGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -255,7 +255,7 @@ func (m *meter) Int64ObservableCounter(
cfg := metric.NewInt64ObservableCounterConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*aiCounter)(nil)),
+ kind: reflect.TypeFor[*aiCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -281,7 +281,7 @@ func (m *meter) Int64ObservableUpDownCounter(
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
+ kind: reflect.TypeFor[*aiUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -307,7 +307,7 @@ func (m *meter) Int64ObservableGauge(
cfg := metric.NewInt64ObservableGaugeConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*aiGauge)(nil)),
+ kind: reflect.TypeFor[*aiGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -330,7 +330,7 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti
cfg := metric.NewFloat64CounterConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*sfCounter)(nil)),
+ kind: reflect.TypeFor[*sfCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -356,7 +356,7 @@ func (m *meter) Float64UpDownCounter(
cfg := metric.NewFloat64UpDownCounterConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
+ kind: reflect.TypeFor[*sfUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -382,7 +382,7 @@ func (m *meter) Float64Histogram(
cfg := metric.NewFloat64HistogramConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*sfHistogram)(nil)),
+ kind: reflect.TypeFor[*sfHistogram](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -405,7 +405,7 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption)
cfg := metric.NewFloat64GaugeConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*sfGauge)(nil)),
+ kind: reflect.TypeFor[*sfGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -431,7 +431,7 @@ func (m *meter) Float64ObservableCounter(
cfg := metric.NewFloat64ObservableCounterConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*afCounter)(nil)),
+ kind: reflect.TypeFor[*afCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -457,7 +457,7 @@ func (m *meter) Float64ObservableUpDownCounter(
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*afUpDownCounter)(nil)),
+ kind: reflect.TypeFor[*afUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@@ -483,7 +483,7 @@ func (m *meter) Float64ObservableGauge(
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
id := instID{
name: name,
- kind: reflect.TypeOf((*afGauge)(nil)),
+ kind: reflect.TypeFor[*afGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go
index 1e6473b3..527d9aec 100644
--- a/vendor/go.opentelemetry.io/otel/metric.go
+++ b/vendor/go.opentelemetry.io/otel/metric.go
@@ -11,7 +11,7 @@ import (
// Meter returns a Meter from the global MeterProvider. The name must be the
// name of the library providing instrumentation. This name may be the same as
// the instrumented code only if that code provides built-in instrumentation.
-// If the name is empty, then a implementation defined default name will be
+// If the name is empty, then an implementation defined default name will be
// used instead.
//
// If this is called before a global MeterProvider is registered the returned
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
index b7fc973a..eb4f5961 100644
--- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
@@ -227,7 +227,11 @@ type Float64Observer interface {
// attributes as another Float64Callbacks also registered for the same
// instrument.
//
-// The function needs to be concurrent safe.
+// The function needs to be reentrant and concurrent safe.
+//
+// Note that Go's mutexes are not reentrant, and locking a mutex takes
+// an indefinite amount of time. It is therefore advised to avoid
+// using mutexes inside callbacks.
type Float64Callback func(context.Context, Float64Observer) error
// Float64ObservableOption applies options to float64 Observer instruments.
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
index 4404b71a..1dfc4b0f 100644
--- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
@@ -225,7 +225,11 @@ type Int64Observer interface {
// attributes as another Int64Callbacks also registered for the same
// instrument.
//
-// The function needs to be concurrent safe.
+// The function needs to be reentrant and concurrent safe.
+//
+// Note that Go's mutexes are not reentrant, and locking a mutex takes
+// an indefinite amount of time. It is therefore advised to avoid
+// using mutexes inside callbacks.
type Int64Callback func(context.Context, Int64Observer) error
// Int64ObservableOption applies options to int64 Observer instruments.
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
index d9e3b13e..e42dd6e7 100644
--- a/vendor/go.opentelemetry.io/otel/metric/config.go
+++ b/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -3,7 +3,11 @@
package metric // import "go.opentelemetry.io/otel/metric"
-import "go.opentelemetry.io/otel/attribute"
+import (
+ "slices"
+
+ "go.opentelemetry.io/otel/attribute"
+)
// MeterConfig contains options for Meters.
type MeterConfig struct {
@@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption {
})
}
-// WithInstrumentationAttributes sets the instrumentation attributes.
+// WithInstrumentationAttributes adds the instrumentation attributes.
+//
+// This is equivalent to calling [WithInstrumentationAttributeSet] with an
+// [attribute.Set] created from a clone of the passed attributes.
+// [WithInstrumentationAttributeSet] is recommended for more control.
//
-// The passed attributes will be de-duplicated.
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
+ set := attribute.NewSet(slices.Clone(attr)...)
+ return WithInstrumentationAttributeSet(set)
+}
+
+// WithInstrumentationAttributeSet adds the instrumentation attributes.
+//
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
+func WithInstrumentationAttributeSet(set attribute.Set) MeterOption {
+ if set.Len() == 0 {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ return config
+ })
+ }
+
return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.attrs = attribute.NewSet(attr...)
+ if config.attrs.Len() == 0 {
+ config.attrs = set
+ } else {
+ config.attrs = mergeSets(config.attrs, set)
+ }
return config
})
}
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
index fdd2a701..a16c4c0a 100644
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -244,7 +244,11 @@ type Meter interface {
// Callbacks. Meaning, it should not report measurements for an instrument with
// the same attributes as another Callback will report.
//
-// The function needs to be concurrent safe.
+// The function needs to be reentrant and concurrent safe.
+//
+// Note that Go's mutexes are not reentrant, and locking a mutex takes
+// an indefinite amount of time. It is therefore advised to avoid
+// using mutexes inside callbacks.
type Callback func(context.Context, Observer) error
// Observer records measurements for multiple instruments in a Callback.
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
index 9afb69e5..634e73ae 100644
--- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -191,6 +191,9 @@ type Int64Counter struct{ embedded.Int64Counter }
// Add performs no operation.
func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+// Enabled performs no operation.
+func (Int64Counter) Enabled(context.Context) bool { return false }
+
// Float64Counter is an OpenTelemetry Counter used to record float64
// measurements. It produces no telemetry.
type Float64Counter struct{ embedded.Float64Counter }
@@ -198,6 +201,9 @@ type Float64Counter struct{ embedded.Float64Counter }
// Add performs no operation.
func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+// Enabled performs no operation.
+func (Float64Counter) Enabled(context.Context) bool { return false }
+
// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
// measurements. It produces no telemetry.
type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
@@ -205,6 +211,9 @@ type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
// Add performs no operation.
func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+// Enabled performs no operation.
+func (Int64UpDownCounter) Enabled(context.Context) bool { return false }
+
// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
// float64 measurements. It produces no telemetry.
type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
@@ -212,6 +221,9 @@ type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
// Add performs no operation.
func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+// Enabled performs no operation.
+func (Float64UpDownCounter) Enabled(context.Context) bool { return false }
+
// Int64Histogram is an OpenTelemetry Histogram used to record int64
// measurements. It produces no telemetry.
type Int64Histogram struct{ embedded.Int64Histogram }
@@ -219,6 +231,9 @@ type Int64Histogram struct{ embedded.Int64Histogram }
// Record performs no operation.
func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+// Enabled performs no operation.
+func (Int64Histogram) Enabled(context.Context) bool { return false }
+
// Float64Histogram is an OpenTelemetry Histogram used to record float64
// measurements. It produces no telemetry.
type Float64Histogram struct{ embedded.Float64Histogram }
@@ -226,6 +241,9 @@ type Float64Histogram struct{ embedded.Float64Histogram }
// Record performs no operation.
func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+// Enabled performs no operation.
+func (Float64Histogram) Enabled(context.Context) bool { return false }
+
// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
// measurements. It produces no telemetry.
type Int64Gauge struct{ embedded.Int64Gauge }
@@ -233,6 +251,9 @@ type Int64Gauge struct{ embedded.Int64Gauge }
// Record performs no operation.
func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
+// Enabled performs no operation.
+func (Int64Gauge) Enabled(context.Context) bool { return false }
+
// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
// measurements. It produces no telemetry.
type Float64Gauge struct{ embedded.Float64Gauge }
@@ -240,6 +261,9 @@ type Float64Gauge struct{ embedded.Float64Gauge }
// Record performs no operation.
func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
+// Enabled performs no operation.
+func (Float64Gauge) Enabled(context.Context) bool { return false }
+
// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
// int64 measurements. It produces no telemetry.
type Int64ObservableCounter struct {
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
index 8403a4ba..57a74c5e 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
@@ -25,6 +25,12 @@ type Float64Counter interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption)
+
+ // Enabled reports whether the instrument will process measurements for the given context.
+ //
+ // This function can be used in places where measuring an instrument
+ // would result in computationally expensive operations.
+ Enabled(context.Context) bool
}
// Float64CounterConfig contains options for synchronous counter instruments that
@@ -78,6 +84,12 @@ type Float64UpDownCounter interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption)
+
+ // Enabled reports whether the instrument will process measurements for the given context.
+ //
+ // This function can be used in places where measuring an instrument
+ // would result in computationally expensive operations.
+ Enabled(context.Context) bool
}
// Float64UpDownCounterConfig contains options for synchronous counter
@@ -131,6 +143,12 @@ type Float64Histogram interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr float64, options ...RecordOption)
+
+ // Enabled reports whether the instrument will process measurements for the given context.
+ //
+ // This function can be used in places where measuring an instrument
+ // would result in computationally expensive operations.
+ Enabled(context.Context) bool
}
// Float64HistogramConfig contains options for synchronous histogram
@@ -189,6 +207,12 @@ type Float64Gauge interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, value float64, options ...RecordOption)
+
+ // Enabled reports whether the instrument will process measurements for the given context.
+ //
+ // This function can be used in places where measuring an instrument
+ // would result in computationally expensive operations.
+ Enabled(context.Context) bool
}
// Float64GaugeConfig contains options for synchronous gauge instruments that
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
index 783fdfba..ac2d033e 100644
--- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
@@ -25,6 +25,12 @@ type Int64Counter interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption)
+
+ // Enabled reports whether the instrument will process measurements for the given context.
+ //
+ // This function can be used in places where measuring an instrument
+ // would result in computationally expensive operations.
+ Enabled(context.Context) bool
}
// Int64CounterConfig contains options for synchronous counter instruments that
@@ -78,6 +84,12 @@ type Int64UpDownCounter interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption)
+
+ // Enabled reports whether the instrument will process measurements for the given context.
+ //
+ // This function can be used in places where measuring an instrument
+ // would result in computationally expensive operations.
+ Enabled(context.Context) bool
}
// Int64UpDownCounterConfig contains options for synchronous counter
@@ -131,6 +143,12 @@ type Int64Histogram interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr int64, options ...RecordOption)
+
+ // Enabled reports whether the instrument will process measurements for the given context.
+ //
+ // This function can be used in places where measuring an instrument
+ // would result in computationally expensive operations.
+ Enabled(context.Context) bool
}
// Int64HistogramConfig contains options for synchronous histogram instruments
@@ -189,6 +207,12 @@ type Int64Gauge interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, value int64, options ...RecordOption)
+
+ // Enabled reports whether the instrument will process measurements for the given context.
+ //
+ // This function can be used in places where measuring an instrument
+ // would result in computationally expensive operations.
+ Enabled(context.Context) bool
}
// Int64GaugeConfig contains options for synchronous gauge instruments that
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 6692d266..271ab71f 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -111,7 +111,7 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
}
// Clear all flags other than the trace-context supported sampling bit.
- scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
+ scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked.
// Ignore the error returned here. Failure to parse tracestate MUST NOT
// affect the parsing of traceparent according to the W3C tracecontext
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go
new file mode 100644
index 00000000..bfeb73e8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/features.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x documents experimental features for [go.opentelemetry.io/otel/sdk].
+package x // import "go.opentelemetry.io/otel/sdk/internal/x"
+
+import "strings"
+
+// Resource is an experimental feature flag that defines if resource detectors
+// should be included experimental semantic conventions.
+//
+// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var Resource = newFeature(
+ []string{"RESOURCE"},
+ func(v string) (string, bool) {
+ if strings.EqualFold(v, "true") {
+ return v, true
+ }
+ return "", false
+ },
+)
+
+// Observability is an experimental feature flag that determines if SDK
+// observability metrics are enabled.
+//
+// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var Observability = newFeature(
+ []string{"OBSERVABILITY", "SELF_OBSERVABILITY"},
+ func(v string) (string, bool) {
+ if strings.EqualFold(v, "true") {
+ return v, true
+ }
+ return "", false
+ },
+)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
index 1be472e9..13347e56 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
@@ -1,48 +1,38 @@
+// Code generated by gotmpl. DO NOT MODIFY.
+// source: internal/shared/x/x.go.tmpl
+
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Package x contains support for OTel SDK experimental features.
-//
-// This package should only be used for features defined in the specification.
-// It should not be used for experiments or new project ideas.
+// Package x documents experimental features for [go.opentelemetry.io/otel/sdk].
package x // import "go.opentelemetry.io/otel/sdk/internal/x"
import (
"os"
- "strings"
)
-// Resource is an experimental feature flag that defines if resource detectors
-// should be included experimental semantic conventions.
-//
-// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
-// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
-// will also enable this).
-var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
- if strings.EqualFold(v, "true") {
- return v, true
- }
- return "", false
-})
-
// Feature is an experimental feature control flag. It provides a uniform way
// to interact with these feature flags and parse their values.
type Feature[T any] struct {
- key string
+ keys []string
parse func(v string) (T, bool)
}
-func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] {
const envKeyRoot = "OTEL_GO_X_"
+ keys := make([]string, 0, len(suffix))
+ for _, s := range suffix {
+ keys = append(keys, envKeyRoot+s)
+ }
return Feature[T]{
- key: envKeyRoot + suffix,
+ keys: keys,
parse: parse,
}
}
-// Key returns the environment variable key that needs to be set to enable the
+// Keys returns the environment variable keys that can be set to enable the
// feature.
-func (f Feature[T]) Key() string { return f.key }
+func (f Feature[T]) Keys() []string { return f.keys }
// Lookup returns the user configured value for the feature and true if the
// user has enabled the feature. Otherwise, if the feature is not enabled, a
@@ -52,11 +42,13 @@ func (f Feature[T]) Lookup() (v T, ok bool) {
//
// > The SDK MUST interpret an empty value of an environment variable the
// > same way as when the variable is unset.
- vRaw := os.Getenv(f.key)
- if vRaw == "" {
- return v, ok
+ for _, key := range f.keys {
+ vRaw := os.Getenv(key)
+ if vRaw != "" {
+ return f.parse(vRaw)
+ }
}
- return f.parse(vRaw)
+ return v, ok
}
// Enabled reports whether the feature is enabled.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
index 0f3b9d62..dd75eefa 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
@@ -63,6 +63,22 @@
// issues for databases that cannot handle high cardinality.
// - A too low of a limit causes loss of attribute detail as more data falls into overflow.
//
+// # Ordering and Collection Guarantees
+//
+// For performance reasons, the SDK does not guarantee that the order in which
+// synchronous measurements are made to the SDK is reflected in the collected
+// metric data. This means that even when a single goroutine makes sequential
+// synchronous measurements, it is possible for a later measurement to be
+// included in the collected metric data when an earlier measurement is not.
+// This applies to measurements made to different instruments, or to different
+// attribute sets on the same instrument. Sequential measurements made to the
+// same instrument and with the same attributes are guaranteed to preserve
+// ordering with respect to collection.
+//
+// Additionally, the SDK does not guarantee that exemplars are always included
+// in the same batch of metric data as the measurement they are associated
+// with.
+//
// See [go.opentelemetry.io/otel/metric] for more information about
// the metric API.
//
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go
index 08e8f68f..15d71389 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go
@@ -7,9 +7,12 @@ import (
"context"
"math"
"math/rand/v2"
+ "sync"
+ "sync/atomic"
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/reservoir"
)
// FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir].
@@ -24,7 +27,19 @@ func FixedSizeReservoirProvider(k int) ReservoirProvider {
// sample each one. If there are more than k, the Reservoir will then randomly
// sample all additional measurement with a decreasing probability.
func NewFixedSizeReservoir(k int) *FixedSizeReservoir {
- return newFixedSizeReservoir(newStorage(k))
+ if k < 0 {
+ k = 0
+ }
+ // Use math.MaxInt32 instead of math.MaxUint32 to prevent overflowing int
+ // on 32-bit systems.
+ if k > math.MaxInt32 {
+ k = math.MaxInt32
+ }
+ return &FixedSizeReservoir{
+ storage: newStorage(k),
+ // Above we ensure k is positive, and less than MaxInt32.
+ nextTracker: newNextTracker(uint32(k)), // nolint: gosec
+ }
}
var _ Reservoir = &FixedSizeReservoir{}
@@ -34,40 +49,9 @@ var _ Reservoir = &FixedSizeReservoir{}
// If there are more than k, the Reservoir will then randomly sample all
// additional measurement with a decreasing probability.
type FixedSizeReservoir struct {
+ reservoir.ConcurrentSafe
*storage
-
- // count is the number of measurement seen.
- count int64
- // next is the next count that will store a measurement at a random index
- // once the reservoir has been filled.
- next int64
- // w is the largest random number in a distribution that is used to compute
- // the next next.
- w float64
-}
-
-func newFixedSizeReservoir(s *storage) *FixedSizeReservoir {
- r := &FixedSizeReservoir{
- storage: s,
- }
- r.reset()
- return r
-}
-
-// randomFloat64 returns, as a float64, a uniform pseudo-random number in the
-// open interval (0.0,1.0).
-func (*FixedSizeReservoir) randomFloat64() float64 {
- // TODO: Use an algorithm that avoids rejection sampling. For example:
- //
- // const precision = 1 << 53 // 2^53
- // // Generate an integer in [1, 2^53 - 1]
- // v := rand.Uint64() % (precision - 1) + 1
- // return float64(v) / float64(precision)
- f := rand.Float64()
- for f == 0 {
- f = rand.Float64()
- }
- return f
+ *nextTracker
}
// Offer accepts the parameters associated with a measurement. The
@@ -123,23 +107,65 @@ func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a
// https://github.com/MrAlias/reservoir-sampling for a performance
// comparison of reservoir sampling algorithms.
- if int(r.count) < cap(r.store) {
- r.store[r.count] = newMeasurement(ctx, t, n, a)
- } else if r.count == r.next {
+ count, next := r.incrementCount()
+ if count < r.k {
+ r.store(ctx, int(count), t, n, a)
+ } else if count == next {
// Overwrite a random existing measurement with the one offered.
- idx := int(rand.Int64N(int64(cap(r.store))))
- r.store[idx] = newMeasurement(ctx, t, n, a)
+ idx := rand.IntN(int(r.k))
+ r.store(ctx, idx, t, n, a)
+ r.wMu.Lock()
+ defer r.wMu.Unlock()
+ newCount, newNext := r.loadCountAndNext()
+ if newNext < next || newCount < count {
+ // This Observe() raced with Collect(), and r.reset() has been
+ // called since r.incrementCount(). Skip the call to advance in
+ // this case because our exemplar may have been collected in the
+ // previous interval.
+ return
+ }
r.advance()
}
- r.count++
+}
+
+// Collect returns all the held exemplars.
+//
+// The Reservoir state is preserved after this call.
+func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) {
+ r.storage.Collect(dest)
+ // Call reset here even though it will reset r.count and restart the random
+ // number series. This will persist any old exemplars as long as no new
+ // measurements are offered, but it will also prioritize those new
+ // measurements that are made over the older collection cycle ones.
+ r.reset()
+}
+
+func newNextTracker(k uint32) *nextTracker {
+ nt := &nextTracker{k: k}
+ nt.reset()
+ return nt
+}
+
+type nextTracker struct {
+ // countAndNext holds the current counts in the lower 32 bits and the next
+ // value in the upper 32 bits.
+ countAndNext atomic.Uint64
+ // w is the largest random number in a distribution that is used to compute
+ // the next next.
+ w float64
+ // wMu ensures w is kept consistent with next during advance and reset.
+ wMu sync.Mutex
+ // k is the number of measurements that can be stored in the reservoir.
+ k uint32
}
// reset resets r to the initial state.
-func (r *FixedSizeReservoir) reset() {
+func (r *nextTracker) reset() {
+ r.wMu.Lock()
+ defer r.wMu.Unlock()
// This resets the number of exemplars known.
- r.count = 0
// Random index inserts should only happen after the storage is full.
- r.next = int64(cap(r.store))
+ r.setCountAndNext(0, r.k)
// Initial random number in the series used to generate r.next.
//
@@ -150,14 +176,40 @@ func (r *FixedSizeReservoir) reset() {
// This maps the uniform random number in (0,1) to a geometric distribution
// over the same interval. The mean of the distribution is inversely
// proportional to the storage capacity.
- r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
+ r.w = math.Exp(math.Log(randomFloat64()) / float64(r.k))
r.advance()
}
+// incrementCount increments the count. It returns the count before the
+// increment and the current next value.
+func (r *nextTracker) incrementCount() (uint32, uint32) {
+ n := r.countAndNext.Add(1)
+ // Both count and next are stored in the upper and lower 32 bits, and thus
+ // can't overflow.
+ return uint32(n&((1<<32)-1) - 1), uint32(n >> 32) // nolint: gosec
+}
+
+// incrementNext increments the next value.
+func (r *nextTracker) incrementNext(inc uint32) {
+ r.countAndNext.Add(uint64(inc) << 32)
+}
+
+// setCountAndNext sets the count and next values.
+func (r *nextTracker) setCountAndNext(count, next uint32) {
+ r.countAndNext.Store(uint64(next)<<32 + uint64(count))
+}
+
+func (r *nextTracker) loadCountAndNext() (uint32, uint32) {
+ n := r.countAndNext.Load()
+ // Both count and next are stored in the upper and lower 32 bits, and thus
+ // can't overflow.
+ return uint32(n&((1<<32)-1) - 1), uint32(n >> 32) // nolint: gosec
+}
+
// advance updates the count at which the offered measurement will overwrite an
// existing exemplar.
-func (r *FixedSizeReservoir) advance() {
+func (r *nextTracker) advance() {
// Calculate the next value in the random number series.
//
// The current value of r.w is based on the max of a distribution of random
@@ -170,7 +222,7 @@ func (r *FixedSizeReservoir) advance() {
// therefore the next r.w will be based on the same distribution (i.e.
// `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by
// computing the next random number `u` and take r.w as `w * u^(1/k)`.
- r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store)))
+ r.w *= math.Exp(math.Log(randomFloat64()) / float64(r.k))
// Use the new random number in the series to calculate the count of the
// next measurement that will be stored.
//
@@ -181,17 +233,21 @@ func (r *FixedSizeReservoir) advance() {
//
// Important to note, the new r.next will always be at least 1 more than
// the last r.next.
- r.next += int64(math.Log(r.randomFloat64())/math.Log(1-r.w)) + 1
+ r.incrementNext(uint32(math.Log(randomFloat64())/math.Log(1-r.w)) + 1)
}
-// Collect returns all the held exemplars.
-//
-// The Reservoir state is preserved after this call.
-func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) {
- r.storage.Collect(dest)
- // Call reset here even though it will reset r.count and restart the random
- // number series. This will persist any old exemplars as long as no new
- // measurements are offered, but it will also prioritize those new
- // measurements that are made over the older collection cycle ones.
- r.reset()
+// randomFloat64 returns, as a float64, a uniform pseudo-random number in the
+// open interval (0.0,1.0).
+func randomFloat64() float64 {
+ // TODO: Use an algorithm that avoids rejection sampling. For example:
+ //
+ // const precision = 1 << 53 // 2^53
+ // // Generate an integer in [1, 2^53 - 1]
+ // v := rand.Uint64() % (precision - 1) + 1
+ // return float64(v) / float64(precision)
+ f := rand.Float64()
+ for f == 0 {
+ f = rand.Float64()
+ }
+ return f
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
index decab613..dacac3eb 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go
@@ -10,6 +10,7 @@ import (
"time"
"go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/internal/reservoir"
)
// HistogramReservoirProvider is a provider of [HistogramReservoir].
@@ -39,6 +40,7 @@ var _ Reservoir = &HistogramReservoir{}
// falls within a histogram bucket. The histogram bucket upper-boundaries are
// define by bounds.
type HistogramReservoir struct {
+ reservoir.ConcurrentSafe
*storage
// bounds are bucket bounds in ascending order.
@@ -57,14 +59,24 @@ type HistogramReservoir struct {
// parameters are the value and dropped (filtered) attributes of the
// measurement respectively.
func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) {
- var x float64
+ var n float64
switch v.Type() {
case Int64ValueType:
- x = float64(v.Int64())
+ n = float64(v.Int64())
case Float64ValueType:
- x = v.Float64()
+ n = v.Float64()
default:
panic("unknown value type")
}
- r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a)
+
+ idx := sort.SearchFloat64s(r.bounds, n)
+
+ r.store(ctx, idx, t, v, a)
+}
+
+// Collect returns all the held exemplars.
+//
+// The Reservoir state is preserved after this call.
+func (r *HistogramReservoir) Collect(dest *[]Exemplar) {
+ r.storage.Collect(dest)
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go
index 0e2e26df..79049602 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go
@@ -5,6 +5,7 @@ package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar"
import (
"context"
+ "sync"
"time"
"go.opentelemetry.io/otel/attribute"
@@ -13,78 +14,84 @@ import (
// storage is an exemplar storage for [Reservoir] implementations.
type storage struct {
- // store are the measurements sampled.
+ // measurements are the measurements sampled.
//
// This does not use []metricdata.Exemplar because it potentially would
// require an allocation for trace and span IDs in the hot path of Offer.
- store []measurement
+ measurements []measurement
}
func newStorage(n int) *storage {
- return &storage{store: make([]measurement, n)}
+ return &storage{measurements: make([]measurement, n)}
+}
+
+func (r *storage) store(ctx context.Context, idx int, ts time.Time, v Value, droppedAttr []attribute.KeyValue) {
+ r.measurements[idx].mux.Lock()
+ defer r.measurements[idx].mux.Unlock()
+ r.measurements[idx].FilteredAttributes = droppedAttr
+ r.measurements[idx].Time = ts
+ r.measurements[idx].Value = v
+ r.measurements[idx].Ctx = ctx
+ r.measurements[idx].valid = true
}
// Collect returns all the held exemplars.
//
// The Reservoir state is preserved after this call.
func (r *storage) Collect(dest *[]Exemplar) {
- *dest = reset(*dest, len(r.store), len(r.store))
+ *dest = reset(*dest, len(r.measurements), len(r.measurements))
var n int
- for _, m := range r.store {
- if !m.valid {
- continue
+ for i := range r.measurements {
+ if r.measurements[i].exemplar(&(*dest)[n]) {
+ n++
}
-
- m.exemplar(&(*dest)[n])
- n++
}
*dest = (*dest)[:n]
}
// measurement is a measurement made by a telemetry system.
type measurement struct {
+ mux sync.Mutex
// FilteredAttributes are the attributes dropped during the measurement.
FilteredAttributes []attribute.KeyValue
// Time is the time when the measurement was made.
Time time.Time
// Value is the value of the measurement.
Value Value
- // SpanContext is the SpanContext active when a measurement was made.
- SpanContext trace.SpanContext
+ // Ctx is the context active when a measurement was made.
+ Ctx context.Context
valid bool
}
-// newMeasurement returns a new non-empty Measurement.
-func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement {
- return measurement{
- FilteredAttributes: droppedAttr,
- Time: ts,
- Value: v,
- SpanContext: trace.SpanContextFromContext(ctx),
- valid: true,
+// exemplar returns m as an [Exemplar].
+// returns true if it populated the exemplar.
+func (m *measurement) exemplar(dest *Exemplar) bool {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+ if !m.valid {
+ return false
}
-}
-// exemplar returns m as an [Exemplar].
-func (m measurement) exemplar(dest *Exemplar) {
dest.FilteredAttributes = m.FilteredAttributes
dest.Time = m.Time
dest.Value = m.Value
- if m.SpanContext.HasTraceID() {
- traceID := m.SpanContext.TraceID()
+ sc := trace.SpanContextFromContext(m.Ctx)
+ if sc.HasTraceID() {
+ traceID := sc.TraceID()
dest.TraceID = traceID[:]
} else {
dest.TraceID = dest.TraceID[:0]
}
- if m.SpanContext.HasSpanID() {
- spanID := m.SpanContext.SpanID()
+ if sc.HasSpanID() {
+ spanID := sc.SpanID()
dest.SpanID = spanID[:]
} else {
dest.SpanID = dest.SpanID[:0]
}
+ return true
}
func reset[T any](s []T, length, capacity int) []T {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
index 63cccc50..b0805255 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
@@ -16,7 +16,6 @@ import (
"go.opentelemetry.io/otel/metric/embedded"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
- "go.opentelemetry.io/otel/sdk/metric/internal/x"
)
var zeroScope instrumentation.Scope
@@ -191,7 +190,6 @@ var (
_ metric.Int64UpDownCounter = (*int64Inst)(nil)
_ metric.Int64Histogram = (*int64Inst)(nil)
_ metric.Int64Gauge = (*int64Inst)(nil)
- _ x.EnabledInstrument = (*int64Inst)(nil)
)
func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) {
@@ -232,7 +230,6 @@ var (
_ metric.Float64UpDownCounter = (*float64Inst)(nil)
_ metric.Float64Histogram = (*float64Inst)(nil)
_ metric.Float64Gauge = (*float64Inst)(nil)
- _ x.EnabledInstrument = (*float64Inst)(nil)
)
func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
index 25ea6244..e0558cb6 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
@@ -23,8 +23,9 @@ const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogr
var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112}
func (i InstrumentKind) String() string {
- if i >= InstrumentKind(len(_InstrumentKind_index)-1) {
+ idx := int(i) - 0
+ if i < 0 || idx >= len(_InstrumentKind_index)-1 {
return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
}
- return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
+ return _InstrumentKind_name[_InstrumentKind_index[idx]:_InstrumentKind_index[idx+1]]
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
index 0321da68..a1ae5573 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
@@ -74,12 +74,13 @@ func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] {
// LastValue returns a last-value aggregate function input and output.
func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
- lv := newLastValue[N](b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
- return b.filter(lv.measure), lv.delta
+ lv := newDeltaLastValue[N](b.AggregationLimit, b.resFunc())
+ return b.filter(lv.measure), lv.collect
default:
- return b.filter(lv.measure), lv.cumulative
+ lv := newCumulativeLastValue[N](b.AggregationLimit, b.resFunc())
+ return b.filter(lv.measure), lv.collect
}
}
@@ -110,12 +111,13 @@ func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregati
// Sum returns a sum aggregate function input and output.
func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
- s := newSum[N](monotonic, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
- return b.filter(s.measure), s.delta
+ s := newDeltaSum[N](monotonic, b.AggregationLimit, b.resFunc())
+ return b.filter(s.measure), s.collect
default:
- return b.filter(s.measure), s.cumulative
+ s := newCumulativeSum[N](monotonic, b.AggregationLimit, b.resFunc())
+ return b.filter(s.measure), s.collect
}
}
@@ -125,12 +127,13 @@ func (b Builder[N]) ExplicitBucketHistogram(
boundaries []float64,
noMinMax, noSum bool,
) (Measure[N], ComputeAggregation) {
- h := newHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
switch b.Temporality {
case metricdata.DeltaTemporality:
- return b.filter(h.measure), h.delta
+ h := newDeltaHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+ return b.filter(h.measure), h.collect
default:
- return b.filter(h.measure), h.cumulative
+ h := newCumulativeHistogram[N](boundaries, noMinMax, noSum, b.AggregationLimit, b.resFunc())
+ return b.filter(h.measure), h.collect
}
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go
new file mode 100644
index 00000000..eb69e965
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/atomic.go
@@ -0,0 +1,275 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
+
+import (
+ "math"
+ "runtime"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// atomicCounter is an efficient way of adding to a number which is either an
+// int64 or float64. It is designed to be efficient when adding whole
+// numbers, regardless of whether N is an int64 or float64.
+//
+// Inspired by the Prometheus counter implementation:
+// https://github.com/prometheus/client_golang/blob/14ccb93091c00f86b85af7753100aa372d63602b/prometheus/counter.go#L108
+type atomicCounter[N int64 | float64] struct {
+ // nFloatBits contains only the non-integer portion of the counter.
+ nFloatBits atomic.Uint64
+ // nInt contains only the integer portion of the counter.
+ nInt atomic.Int64
+}
+
+// load returns the current value. The caller must ensure all calls to add have
+// returned prior to calling load.
+func (n *atomicCounter[N]) load() N {
+ fval := math.Float64frombits(n.nFloatBits.Load())
+ ival := n.nInt.Load()
+ return N(fval + float64(ival))
+}
+
+func (n *atomicCounter[N]) add(value N) {
+ ival := int64(value)
+ // This case is where the value is an int, or if it is a whole-numbered float.
+ if float64(ival) == float64(value) {
+ n.nInt.Add(ival)
+ return
+ }
+
+ // Value must be a float below.
+ for {
+ oldBits := n.nFloatBits.Load()
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + float64(value))
+ if n.nFloatBits.CompareAndSwap(oldBits, newBits) {
+ return
+ }
+ }
+}
+
+// reset resets the internal state, and is not safe to call concurrently.
+func (n *atomicCounter[N]) reset() {
+ n.nFloatBits.Store(0)
+ n.nInt.Store(0)
+}
+
+// atomicN is a generic atomic number value.
+type atomicN[N int64 | float64] struct {
+ val atomic.Uint64
+}
+
+func (a *atomicN[N]) Load() (value N) {
+ v := a.val.Load()
+ switch any(value).(type) {
+ case int64:
+ value = N(v)
+ case float64:
+ value = N(math.Float64frombits(v))
+ default:
+ panic("unsupported type")
+ }
+ return value
+}
+
+func (a *atomicN[N]) Store(v N) {
+ var val uint64
+ switch any(v).(type) {
+ case int64:
+ val = uint64(v)
+ case float64:
+ val = math.Float64bits(float64(v))
+ default:
+ panic("unsupported type")
+ }
+ a.val.Store(val)
+}
+
+func (a *atomicN[N]) CompareAndSwap(oldN, newN N) bool {
+ var o, n uint64
+ switch any(oldN).(type) {
+ case int64:
+ o, n = uint64(oldN), uint64(newN)
+ case float64:
+ o, n = math.Float64bits(float64(oldN)), math.Float64bits(float64(newN))
+ default:
+ panic("unsupported type")
+ }
+ return a.val.CompareAndSwap(o, n)
+}
+
+type atomicMinMax[N int64 | float64] struct {
+ minimum, maximum atomicN[N]
+ set atomic.Bool
+ mu sync.Mutex
+}
+
+// init returns true if the value was used to initialize min and max.
+func (s *atomicMinMax[N]) init(val N) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.set.Load() {
+ defer s.set.Store(true)
+ s.minimum.Store(val)
+ s.maximum.Store(val)
+ return true
+ }
+ return false
+}
+
+func (s *atomicMinMax[N]) Update(val N) {
+ if !s.set.Load() && s.init(val) {
+ return
+ }
+
+ old := s.minimum.Load()
+ for val < old {
+ if s.minimum.CompareAndSwap(old, val) {
+ return
+ }
+ old = s.minimum.Load()
+ }
+
+ old = s.maximum.Load()
+ for old < val {
+ if s.maximum.CompareAndSwap(old, val) {
+ return
+ }
+ old = s.maximum.Load()
+ }
+}
+
+// hotColdWaitGroup is a synchronization primitive which enables lockless
+// writes for concurrent writers and enables a reader to acquire exclusive
+// access to a snapshot of state including only completed operations.
+// Conceptually, it can be thought of as a "hot" wait group,
+// and a "cold" wait group, with the ability for the reader to atomically swap
+// the hot and cold wait groups, and wait for the now-cold wait group to
+// complete.
+//
+// Inspired by the prometheus/client_golang histogram implementation:
+// https://github.com/prometheus/client_golang/blob/a974e0d45e0aa54c65492559114894314d8a2447/prometheus/histogram.go#L725
+//
+// Usage:
+//
+// var hcwg hotColdWaitGroup
+// var data [2]any
+//
+// func write() {
+// hotIdx := hcwg.start()
+// defer hcwg.done(hotIdx)
+// // modify data without locking
+// data[hotIdx].update()
+// }
+//
+// func read() {
+// coldIdx := hcwg.swapHotAndWait()
+// // read data now that all writes to the cold data have completed.
+// data[coldIdx].read()
+// }
+type hotColdWaitGroup struct {
+ // startedCountAndHotIdx contains a 63-bit counter in the lower bits,
+ // and a 1 bit hot index to denote which of the two data-points new
+ // measurements to write to. These are contained together so that read()
+ // can atomically swap the hot bit, reset the started writes to zero, and
+ // read the number writes that were started prior to the hot bit being
+ // swapped.
+ startedCountAndHotIdx atomic.Uint64
+ // endedCounts is the number of writes that have completed to each
+ // dataPoint.
+ endedCounts [2]atomic.Uint64
+}
+
+// start returns the hot index that the writer should write to. The returned
+// hot index is 0 or 1. The caller must call done(hot index) after it finishes
+// its operation. start() is safe to call concurrently with other methods.
+func (l *hotColdWaitGroup) start() uint64 {
+ // We increment h.startedCountAndHotIdx so that the counter in the lower
+ // 63 bits gets incremented. At the same time, we get the new value
+ // back, which we can use to return the currently-hot index.
+ return l.startedCountAndHotIdx.Add(1) >> 63
+}
+
+// done signals to the reader that an operation has fully completed.
+// done is safe to call concurrently.
+func (l *hotColdWaitGroup) done(hotIdx uint64) {
+ l.endedCounts[hotIdx].Add(1)
+}
+
+// swapHotAndWait swaps the hot bit, waits for all start() calls to be done(),
+// and then returns the now-cold index for the reader to read from. The
+// returned index is 0 or 1. swapHotAndWait must not be called concurrently.
+func (l *hotColdWaitGroup) swapHotAndWait() uint64 {
+ n := l.startedCountAndHotIdx.Load()
+ coldIdx := (^n) >> 63
+ // Swap the hot and cold index while resetting the started measurements
+ // count to zero.
+ n = l.startedCountAndHotIdx.Swap((coldIdx << 63))
+ hotIdx := n >> 63
+ startedCount := n & ((1 << 63) - 1)
+ // Wait for all measurements to the previously-hot map to finish.
+ for startedCount != l.endedCounts[hotIdx].Load() {
+ runtime.Gosched() // Let measurements complete.
+ }
+ // reset the number of ended operations
+ l.endedCounts[hotIdx].Store(0)
+ return hotIdx
+}
+
+// limitedSyncMap is a sync.Map which enforces the aggregation limit on
+// attribute sets and provides a Len() function.
+type limitedSyncMap struct {
+ sync.Map
+ aggLimit int
+ len int
+ lenMux sync.Mutex
+}
+
+func (m *limitedSyncMap) LoadOrStoreAttr(fltrAttr attribute.Set, newValue func(attribute.Set) any) any {
+ actual, loaded := m.Load(fltrAttr.Equivalent())
+ if loaded {
+ return actual
+ }
+ // If the overflow set exists, assume we have already overflowed and don't
+ // bother with the slow path below.
+ actual, loaded = m.Load(overflowSet.Equivalent())
+ if loaded {
+ return actual
+ }
+ // Slow path: add a new attribute set.
+ m.lenMux.Lock()
+ defer m.lenMux.Unlock()
+
+ // re-fetch now that we hold the lock to ensure we don't use the overflow
+ // set unless we are sure the attribute set isn't being written
+ // concurrently.
+ actual, loaded = m.Load(fltrAttr.Equivalent())
+ if loaded {
+ return actual
+ }
+
+ if m.aggLimit > 0 && m.len >= m.aggLimit-1 {
+ fltrAttr = overflowSet
+ }
+ actual, loaded = m.LoadOrStore(fltrAttr.Equivalent(), newValue(fltrAttr))
+ if !loaded {
+ m.len++
+ }
+ return actual
+}
+
+func (m *limitedSyncMap) Clear() {
+ m.lenMux.Lock()
+ defer m.lenMux.Unlock()
+ m.len = 0
+ m.Map.Clear()
+}
+
+func (m *limitedSyncMap) Len() int {
+ m.lenMux.Lock()
+ defer m.lenMux.Unlock()
+ return m.len
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
index 857eddf3..2aeba437 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -32,10 +32,9 @@ type expoHistogramDataPoint[N int64 | float64] struct {
attrs attribute.Set
res FilteredExemplarReservoir[N]
- count uint64
- min N
- max N
- sum N
+ min N
+ max N
+ sum N
maxSize int
noMinMax bool
@@ -74,8 +73,6 @@ func newExpoHistogramDataPoint[N int64 | float64](
// record adds a new measurement to the histogram. It will rescale the buckets if needed.
func (p *expoHistogramDataPoint[N]) record(v N) {
- p.count++
-
if !p.noMinMax {
if v < p.min {
p.min = v
@@ -193,6 +190,10 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin int32, length int)
return count
}
+func (p *expoHistogramDataPoint[N]) count() uint64 {
+ return p.posBuckets.count() + p.negBuckets.count() + p.zeroCount
+}
+
// expoBuckets is a set of buckets in an exponential histogram.
type expoBuckets struct {
startBin int32
@@ -285,6 +286,14 @@ func (b *expoBuckets) downscale(delta int32) {
b.startBin >>= delta
}
+func (b *expoBuckets) count() uint64 {
+ var total uint64
+ for _, count := range b.counts {
+ total += count
+ }
+ return total
+}
+
// newExponentialHistogram returns an Aggregator that summarizes a set of
// measurements as an exponential histogram. Each histogram is scoped by attributes
// and the aggregation cycle the measurements were made in.
@@ -301,7 +310,7 @@ func newExponentialHistogram[N int64 | float64](
maxScale: maxScale,
newRes: r,
- limit: newLimiter[*expoHistogramDataPoint[N]](limit),
+ limit: newLimiter[expoHistogramDataPoint[N]](limit),
values: make(map[attribute.Distinct]*expoHistogramDataPoint[N]),
start: now(),
@@ -317,7 +326,7 @@ type expoHistogram[N int64 | float64] struct {
maxScale int32
newRes func(attribute.Set) FilteredExemplarReservoir[N]
- limit limiter[*expoHistogramDataPoint[N]]
+ limit limiter[expoHistogramDataPoint[N]]
values map[attribute.Distinct]*expoHistogramDataPoint[N]
valuesMu sync.Mutex
@@ -338,13 +347,18 @@ func (e *expoHistogram[N]) measure(
e.valuesMu.Lock()
defer e.valuesMu.Unlock()
- attr := e.limit.Attributes(fltrAttr, e.values)
- v, ok := e.values[attr.Equivalent()]
+ v, ok := e.values[fltrAttr.Equivalent()]
if !ok {
- v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum)
- v.res = e.newRes(attr)
-
- e.values[attr.Equivalent()] = v
+ fltrAttr = e.limit.Attributes(fltrAttr, e.values)
+ // If we overflowed, make sure we add to the existing overflow series
+ // if it already exists.
+ v, ok = e.values[fltrAttr.Equivalent()]
+ if !ok {
+ v = newExpoHistogramDataPoint[N](fltrAttr, e.maxSize, e.maxScale, e.noMinMax, e.noSum)
+ v.res = e.newRes(fltrAttr)
+
+ e.values[fltrAttr.Equivalent()] = v
+ }
}
v.record(value)
v.res.Offer(ctx, value, droppedAttr)
@@ -371,7 +385,7 @@ func (e *expoHistogram[N]) delta(
hDPts[i].Attributes = val.attrs
hDPts[i].StartTime = e.start
hDPts[i].Time = t
- hDPts[i].Count = val.count
+ hDPts[i].Count = val.count()
hDPts[i].Scale = val.scale
hDPts[i].ZeroCount = val.zeroCount
hDPts[i].ZeroThreshold = 0.0
@@ -434,7 +448,7 @@ func (e *expoHistogram[N]) cumulative(
hDPts[i].Attributes = val.attrs
hDPts[i].StartTime = e.start
hDPts[i].Time = t
- hDPts[i].Count = val.count
+ hDPts[i].Count = val.count()
hDPts[i].Scale = val.scale
hDPts[i].ZeroCount = val.zeroCount
hDPts[i].ZeroThreshold = 0.0
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
index d4c41642..e4f9409b 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go
@@ -5,10 +5,12 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg
import (
"context"
+ "sync"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/exemplar"
+ "go.opentelemetry.io/otel/sdk/metric/internal/reservoir"
)
// FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter.
@@ -29,6 +31,11 @@ type FilteredExemplarReservoir[N int64 | float64] interface {
type filteredExemplarReservoir[N int64 | float64] struct {
filter exemplar.Filter
reservoir exemplar.Reservoir
+ // The exemplar.Reservoir is not required to be concurrent safe, but
+ // implementations can indicate that they are concurrent-safe by embedding
+ // reservoir.ConcurrentSafe in order to improve performance.
+ reservoirMux sync.Mutex
+ concurrentSafe bool
}
// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values
@@ -37,17 +44,30 @@ func NewFilteredExemplarReservoir[N int64 | float64](
f exemplar.Filter,
r exemplar.Reservoir,
) FilteredExemplarReservoir[N] {
+ _, concurrentSafe := r.(reservoir.ConcurrentSafe)
return &filteredExemplarReservoir[N]{
- filter: f,
- reservoir: r,
+ filter: f,
+ reservoir: r,
+ concurrentSafe: concurrentSafe,
}
}
func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) {
if f.filter(ctx) {
// only record the current time if we are sampling this measurement.
- f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr)
+ ts := time.Now()
+ if !f.concurrentSafe {
+ f.reservoirMux.Lock()
+ defer f.reservoirMux.Unlock()
+ }
+ f.reservoir.Offer(ctx, ts, exemplar.NewValue(val), attr)
}
}
-func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) }
+func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) {
+ if !f.concurrentSafe {
+ f.reservoirMux.Lock()
+ defer f.reservoirMux.Unlock()
+ }
+ f.reservoir.Collect(dest)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
index 736287e7..421325fb 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
@@ -7,140 +7,169 @@ import (
"context"
"slices"
"sort"
- "sync"
+ "sync/atomic"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
-type buckets[N int64 | float64] struct {
+// histogramPoint is a single histogram point, used in delta aggregations.
+type histogramPoint[N int64 | float64] struct {
attrs attribute.Set
res FilteredExemplarReservoir[N]
-
- counts []uint64
- count uint64
- total N
- min, max N
+ histogramPointCounters[N]
}
-// newBuckets returns buckets with n bins.
-func newBuckets[N int64 | float64](attrs attribute.Set, n int) *buckets[N] {
- return &buckets[N]{attrs: attrs, counts: make([]uint64, n)}
+// hotColdHistogramPoint a hot and cold histogram points, used in cumulative
+// aggregations.
+type hotColdHistogramPoint[N int64 | float64] struct {
+ hcwg hotColdWaitGroup
+ hotColdPoint [2]histogramPointCounters[N]
+
+ attrs attribute.Set
+ res FilteredExemplarReservoir[N]
}
-func (b *buckets[N]) sum(value N) { b.total += value }
+// histogramPointCounters contains only the atomic counter data, and is used by
+// both histogramPoint and hotColdHistogramPoint.
+type histogramPointCounters[N int64 | float64] struct {
+ counts []atomic.Uint64
+ total atomicCounter[N]
+ minMax atomicMinMax[N]
+}
-func (b *buckets[N]) bin(idx int, value N) {
- b.counts[idx]++
- b.count++
- if value < b.min {
- b.min = value
- } else if value > b.max {
- b.max = value
+func (b *histogramPointCounters[N]) loadCountsInto(into *[]uint64) uint64 {
+ // TODO (#3047): Making copies for counts incurs a large
+ // memory allocation footprint. Alternatives should be explored.
+ counts := reset(*into, len(b.counts), len(b.counts))
+ count := uint64(0)
+ for i := range b.counts {
+ c := b.counts[i].Load()
+ counts[i] = c
+ count += c
}
+ *into = counts
+ return count
}
-// histValues summarizes a set of measurements as an histValues with
-// explicitly defined buckets.
-type histValues[N int64 | float64] struct {
- noSum bool
- bounds []float64
+// mergeIntoAndReset merges this set of histogram counter data into another,
+// and resets the state of this set of counters. This is used by
+// hotColdHistogramPoint to ensure that the cumulative counters continue to
+// accumulate after being read.
+func (b *histogramPointCounters[N]) mergeIntoAndReset( // nolint:revive // Intentional internal control flag
+ into *histogramPointCounters[N],
+ noMinMax, noSum bool,
+) {
+ for i := range b.counts {
+ into.counts[i].Add(b.counts[i].Load())
+ b.counts[i].Store(0)
+ }
- newRes func(attribute.Set) FilteredExemplarReservoir[N]
- limit limiter[*buckets[N]]
- values map[attribute.Distinct]*buckets[N]
- valuesMu sync.Mutex
-}
+ if !noMinMax {
+ // Do not reset min or max because cumulative min and max only ever grow
+ // smaller or larger respectively.
-func newHistValues[N int64 | float64](
- bounds []float64,
- noSum bool,
- limit int,
- r func(attribute.Set) FilteredExemplarReservoir[N],
-) *histValues[N] {
- // The responsibility of keeping all buckets correctly associated with the
- // passed boundaries is ultimately this type's responsibility. Make a copy
- // here so we can always guarantee this. Or, in the case of failure, have
- // complete control over the fix.
- b := slices.Clone(bounds)
- slices.Sort(b)
- return &histValues[N]{
- noSum: noSum,
- bounds: b,
- newRes: r,
- limit: newLimiter[*buckets[N]](limit),
- values: make(map[attribute.Distinct]*buckets[N]),
+ if b.minMax.set.Load() {
+ into.minMax.Update(b.minMax.minimum.Load())
+ into.minMax.Update(b.minMax.maximum.Load())
+ }
+ }
+ if !noSum {
+ into.total.add(b.total.load())
+ b.total.reset()
}
}
-// Aggregate records the measurement value, scoped by attr, and aggregates it
-// into a histogram.
-func (s *histValues[N]) measure(
+// deltaHistogram is a histogram whose internal storage is reset when it is
+// collected.
+//
+// deltaHistogram's measure is implemented without locking, even when called
+// concurrently with collect. This is done by maintaining two separate maps:
+// one "hot" which is concurrently updated by measure(), and one "cold", which
+// is read and reset by collect(). The [hotcoldWaitGroup] allows collect() to
+// swap the hot and cold maps, and wait for updates to the cold map to complete
+// prior to reading. deltaHistogram swaps ald clears complete maps so that
+// unused attribute sets do not report in subsequent collect() calls.
+type deltaHistogram[N int64 | float64] struct {
+ hcwg hotColdWaitGroup
+ hotColdValMap [2]limitedSyncMap
+
+ start time.Time
+ noMinMax bool
+ noSum bool
+ bounds []float64
+ newRes func(attribute.Set) FilteredExemplarReservoir[N]
+}
+
+func (s *deltaHistogram[N]) measure(
ctx context.Context,
value N,
fltrAttr attribute.Set,
droppedAttr []attribute.KeyValue,
) {
+ hotIdx := s.hcwg.start()
+ defer s.hcwg.done(hotIdx)
+ h := s.hotColdValMap[hotIdx].LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any {
+ hPt := &histogramPoint[N]{
+ res: s.newRes(attr),
+ attrs: attr,
+ // N+1 buckets. For example:
+ //
+ // bounds = [0, 5, 10]
+ //
+ // Then,
+ //
+ // counts = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
+ histogramPointCounters: histogramPointCounters[N]{counts: make([]atomic.Uint64, len(s.bounds)+1)},
+ }
+ return hPt
+ }).(*histogramPoint[N])
+
// This search will return an index in the range [0, len(s.bounds)], where
// it will return len(s.bounds) if value is greater than the last element
- // of s.bounds. This aligns with the buckets in that the length of buckets
+ // of s.bounds. This aligns with the histogramPoint in that the length of histogramPoint
// is len(s.bounds)+1, with the last bucket representing:
// (s.bounds[len(s.bounds)-1], +∞).
idx := sort.SearchFloat64s(s.bounds, float64(value))
-
- s.valuesMu.Lock()
- defer s.valuesMu.Unlock()
-
- attr := s.limit.Attributes(fltrAttr, s.values)
- b, ok := s.values[attr.Equivalent()]
- if !ok {
- // N+1 buckets. For example:
- //
- // bounds = [0, 5, 10]
- //
- // Then,
- //
- // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
- b = newBuckets[N](attr, len(s.bounds)+1)
- b.res = s.newRes(attr)
-
- // Ensure min and max are recorded values (not zero), for new buckets.
- b.min, b.max = value, value
- s.values[attr.Equivalent()] = b
+ h.counts[idx].Add(1)
+ if !s.noMinMax {
+ h.minMax.Update(value)
}
- b.bin(idx, value)
if !s.noSum {
- b.sum(value)
+ h.total.add(value)
}
- b.res.Offer(ctx, value, droppedAttr)
+ h.res.Offer(ctx, value, droppedAttr)
}
-// newHistogram returns an Aggregator that summarizes a set of measurements as
-// an histogram.
-func newHistogram[N int64 | float64](
+// newDeltaHistogram returns a histogram that is reset each time it is
+// collected.
+func newDeltaHistogram[N int64 | float64](
boundaries []float64,
noMinMax, noSum bool,
limit int,
r func(attribute.Set) FilteredExemplarReservoir[N],
-) *histogram[N] {
- return &histogram[N]{
- histValues: newHistValues[N](boundaries, noSum, limit, r),
- noMinMax: noMinMax,
- start: now(),
+) *deltaHistogram[N] {
+ // The responsibility of keeping all histogramPoint correctly associated with the
+ // passed boundaries is ultimately this type's responsibility. Make a copy
+ // here so we can always guarantee this. Or, in the case of failure, have
+ // complete control over the fix.
+ b := slices.Clone(boundaries)
+ slices.Sort(b)
+ return &deltaHistogram[N]{
+ start: now(),
+ noMinMax: noMinMax,
+ noSum: noSum,
+ bounds: b,
+ newRes: r,
+ hotColdValMap: [2]limitedSyncMap{
+ {aggLimit: limit},
+ {aggLimit: limit},
+ },
}
}
-// histogram summarizes a set of measurements as an histogram with explicitly
-// defined buckets.
-type histogram[N int64 | float64] struct {
- *histValues[N]
-
- noMinMax bool
- start time.Time
-}
-
-func (s *histogram[N]) delta(
+func (s *deltaHistogram[N]) collect(
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
) int {
t := now()
@@ -150,39 +179,46 @@ func (s *histogram[N]) delta(
h, _ := (*dest).(metricdata.Histogram[N])
h.Temporality = metricdata.DeltaTemporality
- s.valuesMu.Lock()
- defer s.valuesMu.Unlock()
+ // delta always clears values on collection
+ readIdx := s.hcwg.swapHotAndWait()
// Do not allow modification of our copy of bounds.
bounds := slices.Clone(s.bounds)
- n := len(s.values)
+ // The len will not change while we iterate over values, since we waited
+ // for all writes to finish to the cold values and len.
+ n := s.hotColdValMap[readIdx].Len()
hDPts := reset(h.DataPoints, n, n)
var i int
- for _, val := range s.values {
+ s.hotColdValMap[readIdx].Range(func(_, value any) bool {
+ val := value.(*histogramPoint[N])
+
+ count := val.loadCountsInto(&hDPts[i].BucketCounts)
hDPts[i].Attributes = val.attrs
hDPts[i].StartTime = s.start
hDPts[i].Time = t
- hDPts[i].Count = val.count
+ hDPts[i].Count = count
hDPts[i].Bounds = bounds
- hDPts[i].BucketCounts = val.counts
if !s.noSum {
- hDPts[i].Sum = val.total
+ hDPts[i].Sum = val.total.load()
}
if !s.noMinMax {
- hDPts[i].Min = metricdata.NewExtrema(val.min)
- hDPts[i].Max = metricdata.NewExtrema(val.max)
+ if val.minMax.set.Load() {
+ hDPts[i].Min = metricdata.NewExtrema(val.minMax.minimum.Load())
+ hDPts[i].Max = metricdata.NewExtrema(val.minMax.maximum.Load())
+ }
}
collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
i++
- }
+ return true
+ })
// Unused attribute sets do not report.
- clear(s.values)
+ s.hotColdValMap[readIdx].Clear()
// The delta collection cycle resets.
s.start = t
@@ -192,7 +228,101 @@ func (s *histogram[N]) delta(
return n
}
-func (s *histogram[N]) cumulative(
+// cumulativeHistogram summarizes a set of measurements as an histogram with explicitly
+// defined histogramPoint.
+//
+// cumulativeHistogram's measure is implemented without locking, even when
+// called concurrently with collect. This is done by maintaining two separate
+// histogramPointCounters for each attribute set: one "hot" which is
+// concurrently updated by measure(), and one "cold", which is read and reset
+// by collect(). The [hotcoldWaitGroup] allows collect() to swap the hot and
+// cold counters, and wait for updates to the cold counters to complete prior
+// to reading. Unlike deltaHistogram, this maintains a single map so that the
+// preserved attribute sets do not change when collect() is called.
+type cumulativeHistogram[N int64 | float64] struct {
+ values limitedSyncMap
+
+ start time.Time
+ noMinMax bool
+ noSum bool
+ bounds []float64
+ newRes func(attribute.Set) FilteredExemplarReservoir[N]
+}
+
+// newCumulativeHistogram returns a histogram that accumulates measurements
+// into a histogram data structure. It is never reset.
+func newCumulativeHistogram[N int64 | float64](
+ boundaries []float64,
+ noMinMax, noSum bool,
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *cumulativeHistogram[N] {
+ // The responsibility of keeping all histogramPoint correctly associated with the
+ // passed boundaries is ultimately this type's responsibility. Make a copy
+ // here so we can always guarantee this. Or, in the case of failure, have
+ // complete control over the fix.
+ b := slices.Clone(boundaries)
+ slices.Sort(b)
+ return &cumulativeHistogram[N]{
+ start: now(),
+ noMinMax: noMinMax,
+ noSum: noSum,
+ bounds: b,
+ newRes: r,
+ values: limitedSyncMap{aggLimit: limit},
+ }
+}
+
+func (s *cumulativeHistogram[N]) measure(
+ ctx context.Context,
+ value N,
+ fltrAttr attribute.Set,
+ droppedAttr []attribute.KeyValue,
+) {
+ h := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any {
+ hPt := &hotColdHistogramPoint[N]{
+ res: s.newRes(attr),
+ attrs: attr,
+ // N+1 buckets. For example:
+ //
+ // bounds = [0, 5, 10]
+ //
+ // Then,
+ //
+ // count = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
+ hotColdPoint: [2]histogramPointCounters[N]{
+ {
+ counts: make([]atomic.Uint64, len(s.bounds)+1),
+ },
+ {
+ counts: make([]atomic.Uint64, len(s.bounds)+1),
+ },
+ },
+ }
+ return hPt
+ }).(*hotColdHistogramPoint[N])
+
+ // This search will return an index in the range [0, len(s.bounds)], where
+ // it will return len(s.bounds) if value is greater than the last element
+ // of s.bounds. This aligns with the histogramPoint in that the length of histogramPoint
+ // is len(s.bounds)+1, with the last bucket representing:
+ // (s.bounds[len(s.bounds)-1], +∞).
+ idx := sort.SearchFloat64s(s.bounds, float64(value))
+
+ hotIdx := h.hcwg.start()
+ defer h.hcwg.done(hotIdx)
+
+ h.hotColdPoint[hotIdx].counts[idx].Add(1)
+ if !s.noMinMax {
+ h.hotColdPoint[hotIdx].minMax.Update(value)
+ }
+ if !s.noSum {
+ h.hotColdPoint[hotIdx].total.add(value)
+ }
+ h.res.Offer(ctx, value, droppedAttr)
+}
+
+func (s *cumulativeHistogram[N]) collect(
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
) int {
t := now()
@@ -202,50 +332,58 @@ func (s *histogram[N]) cumulative(
h, _ := (*dest).(metricdata.Histogram[N])
h.Temporality = metricdata.CumulativeTemporality
- s.valuesMu.Lock()
- defer s.valuesMu.Unlock()
-
// Do not allow modification of our copy of bounds.
bounds := slices.Clone(s.bounds)
- n := len(s.values)
- hDPts := reset(h.DataPoints, n, n)
+ // Values are being concurrently written while we iterate, so only use the
+ // current length for capacity.
+ hDPts := reset(h.DataPoints, 0, s.values.Len())
var i int
- for _, val := range s.values {
- hDPts[i].Attributes = val.attrs
- hDPts[i].StartTime = s.start
- hDPts[i].Time = t
- hDPts[i].Count = val.count
- hDPts[i].Bounds = bounds
-
- // The HistogramDataPoint field values returned need to be copies of
- // the buckets value as we will keep updating them.
- //
- // TODO (#3047): Making copies for bounds and counts incurs a large
- // memory allocation footprint. Alternatives should be explored.
- hDPts[i].BucketCounts = slices.Clone(val.counts)
+ s.values.Range(func(_, value any) bool {
+ val := value.(*hotColdHistogramPoint[N])
+ // swap, observe, and clear the point
+ readIdx := val.hcwg.swapHotAndWait()
+ var bucketCounts []uint64
+ count := val.hotColdPoint[readIdx].loadCountsInto(&bucketCounts)
+ newPt := metricdata.HistogramDataPoint[N]{
+ Attributes: val.attrs,
+ StartTime: s.start,
+ Time: t,
+ Count: count,
+ Bounds: bounds,
+ // The HistogramDataPoint field values returned need to be copies of
+ // the histogramPoint value as we will keep updating them.
+ BucketCounts: bucketCounts,
+ }
if !s.noSum {
- hDPts[i].Sum = val.total
+ newPt.Sum = val.hotColdPoint[readIdx].total.load()
}
-
if !s.noMinMax {
- hDPts[i].Min = metricdata.NewExtrema(val.min)
- hDPts[i].Max = metricdata.NewExtrema(val.max)
+ if val.hotColdPoint[readIdx].minMax.set.Load() {
+ newPt.Min = metricdata.NewExtrema(val.hotColdPoint[readIdx].minMax.minimum.Load())
+ newPt.Max = metricdata.NewExtrema(val.hotColdPoint[readIdx].minMax.maximum.Load())
+ }
}
+ // Once we've read the point, merge it back into the hot histogram
+ // point since it is cumulative.
+ hotIdx := (readIdx + 1) % 2
+ val.hotColdPoint[readIdx].mergeIntoAndReset(&val.hotColdPoint[hotIdx], s.noMinMax, s.noSum)
- collectExemplars(&hDPts[i].Exemplars, val.res.Collect)
+ collectExemplars(&newPt.Exemplars, val.res.Collect)
+ hDPts = append(hDPts, newPt)
i++
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
// sets that become "stale" need to be forgotten so this will not
// overload the system.
- }
+ return true
+ })
h.DataPoints = hDPts
*dest = h
- return n
+ return i
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
index 4bbe624c..4924d732 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
@@ -5,115 +5,179 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg
import (
"context"
- "sync"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
-// datapoint is timestamped measurement data.
-type datapoint[N int64 | float64] struct {
+// lastValuePoint is timestamped measurement data.
+type lastValuePoint[N int64 | float64] struct {
attrs attribute.Set
- value N
+ value atomicN[N]
res FilteredExemplarReservoir[N]
}
-func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] {
- return &lastValue[N]{
+// lastValueMap summarizes a set of measurements as the last one made.
+type lastValueMap[N int64 | float64] struct {
+ newRes func(attribute.Set) FilteredExemplarReservoir[N]
+ values limitedSyncMap
+}
+
+func (s *lastValueMap[N]) measure(
+ ctx context.Context,
+ value N,
+ fltrAttr attribute.Set,
+ droppedAttr []attribute.KeyValue,
+) {
+ lv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any {
+ return &lastValuePoint[N]{
+ res: s.newRes(attr),
+ attrs: attr,
+ }
+ }).(*lastValuePoint[N])
+
+ lv.value.Store(value)
+ lv.res.Offer(ctx, value, droppedAttr)
+}
+
+func newDeltaLastValue[N int64 | float64](
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *deltaLastValue[N] {
+ return &deltaLastValue[N]{
newRes: r,
- limit: newLimiter[datapoint[N]](limit),
- values: make(map[attribute.Distinct]datapoint[N]),
start: now(),
+ hotColdValMap: [2]lastValueMap[N]{
+ {
+ values: limitedSyncMap{aggLimit: limit},
+ newRes: r,
+ },
+ {
+ values: limitedSyncMap{aggLimit: limit},
+ newRes: r,
+ },
+ },
}
}
-// lastValue summarizes a set of measurements as the last one made.
-type lastValue[N int64 | float64] struct {
- sync.Mutex
-
+// deltaLastValue summarizes a set of measurements as the last one made.
+type deltaLastValue[N int64 | float64] struct {
newRes func(attribute.Set) FilteredExemplarReservoir[N]
- limit limiter[datapoint[N]]
- values map[attribute.Distinct]datapoint[N]
start time.Time
-}
-func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
- s.Lock()
- defer s.Unlock()
-
- attr := s.limit.Attributes(fltrAttr, s.values)
- d, ok := s.values[attr.Equivalent()]
- if !ok {
- d.res = s.newRes(attr)
- }
-
- d.attrs = attr
- d.value = value
- d.res.Offer(ctx, value, droppedAttr)
+ hcwg hotColdWaitGroup
+ hotColdValMap [2]lastValueMap[N]
+}
- s.values[attr.Equivalent()] = d
+func (s *deltaLastValue[N]) measure(
+ ctx context.Context,
+ value N,
+ fltrAttr attribute.Set,
+ droppedAttr []attribute.KeyValue,
+) {
+ hotIdx := s.hcwg.start()
+ defer s.hcwg.done(hotIdx)
+ s.hotColdValMap[hotIdx].measure(ctx, value, fltrAttr, droppedAttr)
}
-func (s *lastValue[N]) delta(
+func (s *deltaLastValue[N]) collect(
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
) int {
t := now()
+ n := s.copyAndClearDpts(dest, t)
+ // Update start time for delta temporality.
+ s.start = t
+ return n
+}
+
+// copyAndClearDpts copies the lastValuePoints held by s into dest. The number of lastValuePoints
+// copied is returned.
+func (s *deltaLastValue[N]) copyAndClearDpts(
+ dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
+ t time.Time,
+) int {
// Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
// the DataPoints is missed (better luck next time).
gData, _ := (*dest).(metricdata.Gauge[N])
+ // delta always clears values on collection
+ readIdx := s.hcwg.swapHotAndWait()
+ // The len will not change while we iterate over values, since we waited
+ // for all writes to finish to the cold values and len.
+ n := s.hotColdValMap[readIdx].values.Len()
+ dPts := reset(gData.DataPoints, n, n)
- s.Lock()
- defer s.Unlock()
-
- n := s.copyDpts(&gData.DataPoints, t)
+ var i int
+ s.hotColdValMap[readIdx].values.Range(func(_, value any) bool {
+ v := value.(*lastValuePoint[N])
+ dPts[i].Attributes = v.attrs
+ dPts[i].StartTime = s.start
+ dPts[i].Time = t
+ dPts[i].Value = v.value.Load()
+ collectExemplars[N](&dPts[i].Exemplars, v.res.Collect)
+ i++
+ return true
+ })
+ gData.DataPoints = dPts
// Do not report stale values.
- clear(s.values)
- // Update start time for delta temporality.
- s.start = t
-
+ s.hotColdValMap[readIdx].values.Clear()
*dest = gData
+ return i
+}
- return n
+// cumulativeLastValue summarizes a set of measurements as the last one made.
+type cumulativeLastValue[N int64 | float64] struct {
+ lastValueMap[N]
+ start time.Time
+}
+
+func newCumulativeLastValue[N int64 | float64](
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *cumulativeLastValue[N] {
+ return &cumulativeLastValue[N]{
+ lastValueMap: lastValueMap[N]{
+ values: limitedSyncMap{aggLimit: limit},
+ newRes: r,
+ },
+ start: now(),
+ }
}
-func (s *lastValue[N]) cumulative(
+func (s *cumulativeLastValue[N]) collect(
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
) int {
t := now()
// Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
- // the DataPoints is missed (better luck next time).
+ // the lastValuePoints is missed (better luck next time).
gData, _ := (*dest).(metricdata.Gauge[N])
- s.Lock()
- defer s.Unlock()
+ // Values are being concurrently written while we iterate, so only use the
+ // current length for capacity.
+ dPts := reset(gData.DataPoints, 0, s.values.Len())
- n := s.copyDpts(&gData.DataPoints, t)
+ var i int
+ s.values.Range(func(_, value any) bool {
+ v := value.(*lastValuePoint[N])
+ newPt := metricdata.DataPoint[N]{
+ Attributes: v.attrs,
+ StartTime: s.start,
+ Time: t,
+ Value: v.value.Load(),
+ }
+ collectExemplars[N](&newPt.Exemplars, v.res.Collect)
+ dPts = append(dPts, newPt)
+ i++
+ return true
+ })
+ gData.DataPoints = dPts
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
// sets that become "stale" need to be forgotten so this will not
// overload the system.
*dest = gData
- return n
-}
-
-// copyDpts copies the datapoints held by s into dest. The number of datapoints
-// copied is returned.
-func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) int {
- n := len(s.values)
- *dest = reset(*dest, n, n)
-
- var i int
- for _, v := range s.values {
- (*dest)[i].Attributes = v.attrs
- (*dest)[i].StartTime = s.start
- (*dest)[i].Time = t
- (*dest)[i].Value = v.value
- collectExemplars(&(*dest)[i].Exemplars, v.res.Collect)
- i++
- }
- return n
+ return i
}
// newPrecomputedLastValue returns an aggregator that summarizes a set of
@@ -122,51 +186,23 @@ func newPrecomputedLastValue[N int64 | float64](
limit int,
r func(attribute.Set) FilteredExemplarReservoir[N],
) *precomputedLastValue[N] {
- return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)}
+ return &precomputedLastValue[N]{deltaLastValue: newDeltaLastValue[N](limit, r)}
}
// precomputedLastValue summarizes a set of observations as the last one made.
type precomputedLastValue[N int64 | float64] struct {
- *lastValue[N]
+ *deltaLastValue[N]
}
func (s *precomputedLastValue[N]) delta(
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
) int {
- t := now()
- // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
- // the DataPoints is missed (better luck next time).
- gData, _ := (*dest).(metricdata.Gauge[N])
-
- s.Lock()
- defer s.Unlock()
-
- n := s.copyDpts(&gData.DataPoints, t)
- // Do not report stale values.
- clear(s.values)
- // Update start time for delta temporality.
- s.start = t
-
- *dest = gData
-
- return n
+ return s.collect(dest)
}
func (s *precomputedLastValue[N]) cumulative(
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
) int {
- t := now()
- // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of
- // the DataPoints is missed (better luck next time).
- gData, _ := (*dest).(metricdata.Gauge[N])
-
- s.Lock()
- defer s.Unlock()
-
- n := s.copyDpts(&gData.DataPoints, t)
- // Do not report stale values.
- clear(s.values)
- *dest = gData
-
- return n
+ // Do not reset the start time.
+ return s.copyAndClearDpts(dest, now())
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
index 9ea0251e..c19a1aff 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/limit.go
@@ -30,7 +30,7 @@ func newLimiter[V any](aggregation int) limiter[V] {
// aggregation cardinality limit for the existing measurements. If it will,
// overflowSet is returned. Otherwise, if it will not exceed the limit, or the
// limit is not set (limit <= 0), attr is returned.
-func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]V) attribute.Set {
+func (l limiter[V]) Attributes(attrs attribute.Set, measurements map[attribute.Distinct]*V) attribute.Set {
if l.aggLimit > 0 {
_, exists := measurements[attrs.Equivalent()]
if !exists && len(measurements) >= l.aggLimit-1 {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
index 1b4b2304..66cb6808 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
@@ -5,7 +5,6 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg
import (
"context"
- "sync"
"time"
"go.opentelemetry.io/otel/attribute"
@@ -13,64 +12,75 @@ import (
)
type sumValue[N int64 | float64] struct {
- n N
+ n atomicCounter[N]
res FilteredExemplarReservoir[N]
attrs attribute.Set
}
-// valueMap is the storage for sums.
-type valueMap[N int64 | float64] struct {
- sync.Mutex
+type sumValueMap[N int64 | float64] struct {
+ values limitedSyncMap
newRes func(attribute.Set) FilteredExemplarReservoir[N]
- limit limiter[sumValue[N]]
- values map[attribute.Distinct]sumValue[N]
}
-func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] {
- return &valueMap[N]{
- newRes: r,
- limit: newLimiter[sumValue[N]](limit),
- values: make(map[attribute.Distinct]sumValue[N]),
- }
-}
-
-func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
- s.Lock()
- defer s.Unlock()
-
- attr := s.limit.Attributes(fltrAttr, s.values)
- v, ok := s.values[attr.Equivalent()]
- if !ok {
- v.res = s.newRes(attr)
- }
-
- v.attrs = attr
- v.n += value
- v.res.Offer(ctx, value, droppedAttr)
-
- s.values[attr.Equivalent()] = v
+func (s *sumValueMap[N]) measure(
+ ctx context.Context,
+ value N,
+ fltrAttr attribute.Set,
+ droppedAttr []attribute.KeyValue,
+) {
+ sv := s.values.LoadOrStoreAttr(fltrAttr, func(attr attribute.Set) any {
+ return &sumValue[N]{
+ res: s.newRes(attr),
+ attrs: attr,
+ }
+ }).(*sumValue[N])
+ sv.n.add(value)
+ // It is possible for collection to race with measurement and observe the
+ // exemplar in the batch of metrics after the add() for cumulative sums.
+ // This is an accepted tradeoff to avoid locking during measurement.
+ sv.res.Offer(ctx, value, droppedAttr)
}
-// newSum returns an aggregator that summarizes a set of measurements as their
-// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
-// the measurements were made in.
-func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] {
- return &sum[N]{
- valueMap: newValueMap[N](limit, r),
+// newDeltaSum returns an aggregator that summarizes a set of measurements as
+// their arithmetic sum. Each sum is scoped by attributes and the aggregation
+// cycle the measurements were made in.
+func newDeltaSum[N int64 | float64](
+ monotonic bool,
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *deltaSum[N] {
+ return &deltaSum[N]{
monotonic: monotonic,
start: now(),
+ hotColdValMap: [2]sumValueMap[N]{
+ {
+ values: limitedSyncMap{aggLimit: limit},
+ newRes: r,
+ },
+ {
+ values: limitedSyncMap{aggLimit: limit},
+ newRes: r,
+ },
+ },
}
}
-// sum summarizes a set of measurements made as their arithmetic sum.
-type sum[N int64 | float64] struct {
- *valueMap[N]
-
+// deltaSum is the storage for sums which resets every collection interval.
+type deltaSum[N int64 | float64] struct {
monotonic bool
start time.Time
+
+ hcwg hotColdWaitGroup
+ hotColdValMap [2]sumValueMap[N]
+}
+
+func (s *deltaSum[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) {
+ hotIdx := s.hcwg.start()
+ defer s.hcwg.done(hotIdx)
+ s.hotColdValMap[hotIdx].measure(ctx, value, fltrAttr, droppedAttr)
}
-func (s *sum[N]) delta(
+func (s *deltaSum[N]) collect(
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
) int {
t := now()
@@ -81,33 +91,61 @@ func (s *sum[N]) delta(
sData.Temporality = metricdata.DeltaTemporality
sData.IsMonotonic = s.monotonic
- s.Lock()
- defer s.Unlock()
-
- n := len(s.values)
+ // delta always clears values on collection
+ readIdx := s.hcwg.swapHotAndWait()
+ // The len will not change while we iterate over values, since we waited
+ // for all writes to finish to the cold values and len.
+ n := s.hotColdValMap[readIdx].values.Len()
dPts := reset(sData.DataPoints, n, n)
var i int
- for _, val := range s.values {
+ s.hotColdValMap[readIdx].values.Range(func(_, value any) bool {
+ val := value.(*sumValue[N])
+ collectExemplars(&dPts[i].Exemplars, val.res.Collect)
dPts[i].Attributes = val.attrs
dPts[i].StartTime = s.start
dPts[i].Time = t
- dPts[i].Value = val.n
- collectExemplars(&dPts[i].Exemplars, val.res.Collect)
+ dPts[i].Value = val.n.load()
i++
- }
- // Do not report stale values.
- clear(s.values)
+ return true
+ })
+ s.hotColdValMap[readIdx].values.Clear()
// The delta collection cycle resets.
s.start = t
sData.DataPoints = dPts
*dest = sData
- return n
+ return i
+}
+
+// newCumulativeSum returns an aggregator that summarizes a set of measurements
+// as their arithmetic sum. Each sum is scoped by attributes and the
+// aggregation cycle the measurements were made in.
+func newCumulativeSum[N int64 | float64](
+ monotonic bool,
+ limit int,
+ r func(attribute.Set) FilteredExemplarReservoir[N],
+) *cumulativeSum[N] {
+ return &cumulativeSum[N]{
+ monotonic: monotonic,
+ start: now(),
+ sumValueMap: sumValueMap[N]{
+ values: limitedSyncMap{aggLimit: limit},
+ newRes: r,
+ },
+ }
}
-func (s *sum[N]) cumulative(
+// deltaSum is the storage for sums which never reset.
+type cumulativeSum[N int64 | float64] struct {
+ monotonic bool
+ start time.Time
+
+ sumValueMap[N]
+}
+
+func (s *cumulativeSum[N]) collect(
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
) int {
t := now()
@@ -118,30 +156,33 @@ func (s *sum[N]) cumulative(
sData.Temporality = metricdata.CumulativeTemporality
sData.IsMonotonic = s.monotonic
- s.Lock()
- defer s.Unlock()
-
- n := len(s.values)
- dPts := reset(sData.DataPoints, n, n)
+ // Values are being concurrently written while we iterate, so only use the
+ // current length for capacity.
+ dPts := reset(sData.DataPoints, 0, s.values.Len())
var i int
- for _, value := range s.values {
- dPts[i].Attributes = value.attrs
- dPts[i].StartTime = s.start
- dPts[i].Time = t
- dPts[i].Value = value.n
- collectExemplars(&dPts[i].Exemplars, value.res.Collect)
+ s.values.Range(func(_, value any) bool {
+ val := value.(*sumValue[N])
+ newPt := metricdata.DataPoint[N]{
+ Attributes: val.attrs,
+ StartTime: s.start,
+ Time: t,
+ Value: val.n.load(),
+ }
+ collectExemplars(&newPt.Exemplars, val.res.Collect)
+ dPts = append(dPts, newPt)
// TODO (#3006): This will use an unbounded amount of memory if there
// are unbounded number of attribute sets being aggregated. Attribute
// sets that become "stale" need to be forgotten so this will not
// overload the system.
i++
- }
+ return true
+ })
sData.DataPoints = dPts
*dest = sData
- return n
+ return i
}
// newPrecomputedSum returns an aggregator that summarizes a set of
@@ -153,27 +194,22 @@ func newPrecomputedSum[N int64 | float64](
r func(attribute.Set) FilteredExemplarReservoir[N],
) *precomputedSum[N] {
return &precomputedSum[N]{
- valueMap: newValueMap[N](limit, r),
- monotonic: monotonic,
- start: now(),
+ deltaSum: newDeltaSum(monotonic, limit, r),
}
}
// precomputedSum summarizes a set of observations as their arithmetic sum.
type precomputedSum[N int64 | float64] struct {
- *valueMap[N]
+ *deltaSum[N]
- monotonic bool
- start time.Time
-
- reported map[attribute.Distinct]N
+ reported map[any]N
}
func (s *precomputedSum[N]) delta(
dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface
) int {
t := now()
- newReported := make(map[attribute.Distinct]N)
+ newReported := make(map[any]N)
// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
// use the zero-value sData and hope for better alignment next cycle.
@@ -181,27 +217,29 @@ func (s *precomputedSum[N]) delta(
sData.Temporality = metricdata.DeltaTemporality
sData.IsMonotonic = s.monotonic
- s.Lock()
- defer s.Unlock()
-
- n := len(s.values)
+ // delta always clears values on collection
+ readIdx := s.hcwg.swapHotAndWait()
+ // The len will not change while we iterate over values, since we waited
+ // for all writes to finish to the cold values and len.
+ n := s.hotColdValMap[readIdx].values.Len()
dPts := reset(sData.DataPoints, n, n)
var i int
- for key, value := range s.values {
- delta := value.n - s.reported[key]
+ s.hotColdValMap[readIdx].values.Range(func(key, value any) bool {
+ val := value.(*sumValue[N])
+ n := val.n.load()
- dPts[i].Attributes = value.attrs
+ delta := n - s.reported[key]
+ collectExemplars(&dPts[i].Exemplars, val.res.Collect)
+ dPts[i].Attributes = val.attrs
dPts[i].StartTime = s.start
dPts[i].Time = t
dPts[i].Value = delta
- collectExemplars(&dPts[i].Exemplars, value.res.Collect)
-
- newReported[key] = value.n
+ newReported[key] = n
i++
- }
- // Unused attribute sets do not report.
- clear(s.values)
+ return true
+ })
+ s.hotColdValMap[readIdx].values.Clear()
s.reported = newReported
// The delta collection cycle resets.
s.start = t
@@ -209,7 +247,7 @@ func (s *precomputedSum[N]) delta(
sData.DataPoints = dPts
*dest = sData
- return n
+ return i
}
func (s *precomputedSum[N]) cumulative(
@@ -223,27 +261,28 @@ func (s *precomputedSum[N]) cumulative(
sData.Temporality = metricdata.CumulativeTemporality
sData.IsMonotonic = s.monotonic
- s.Lock()
- defer s.Unlock()
-
- n := len(s.values)
+ // cumulative precomputed always clears values on collection
+ readIdx := s.hcwg.swapHotAndWait()
+ // The len will not change while we iterate over values, since we waited
+ // for all writes to finish to the cold values and len.
+ n := s.hotColdValMap[readIdx].values.Len()
dPts := reset(sData.DataPoints, n, n)
var i int
- for _, val := range s.values {
+ s.hotColdValMap[readIdx].values.Range(func(_, value any) bool {
+ val := value.(*sumValue[N])
+ collectExemplars(&dPts[i].Exemplars, val.res.Collect)
dPts[i].Attributes = val.attrs
dPts[i].StartTime = s.start
dPts[i].Time = t
- dPts[i].Value = val.n
- collectExemplars(&dPts[i].Exemplars, val.res.Collect)
-
+ dPts[i].Value = val.n.load()
i++
- }
- // Unused attribute sets do not report.
- clear(s.values)
+ return true
+ })
+ s.hotColdValMap[readIdx].values.Clear()
sData.DataPoints = dPts
*dest = sData
- return n
+ return i
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go
new file mode 100644
index 00000000..66788c9e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/observ/instrumentation.go
@@ -0,0 +1,168 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package observ provides experimental observability instrumentation for the
+// metric reader.
+package observ // import "go.opentelemetry.io/otel/sdk/metric/internal/observ"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
+ "go.opentelemetry.io/otel/sdk/internal/x"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
+ "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv"
+)
+
+const (
+ // ScopeName is the unique name of the meter used for instrumentation.
+ ScopeName = "go.opentelemetry.io/otel/sdk/metric/internal/observ"
+
+ // SchemaURL is the schema URL of the metrics produced by this
+ // instrumentation.
+ SchemaURL = semconv.SchemaURL
+)
+
+var (
+ measureAttrsPool = &sync.Pool{
+ New: func() any {
+ const n = 1 + // component.name
+ 1 + // component.type
+ 1 // error.type
+ s := make([]attribute.KeyValue, 0, n)
+ // Return a pointer to a slice instead of a slice itself
+ // to avoid allocations on every call.
+ return &s
+ },
+ }
+
+ recordOptPool = &sync.Pool{
+ New: func() any {
+ const n = 1 // WithAttributeSet
+ o := make([]metric.RecordOption, 0, n)
+ return &o
+ },
+ }
+)
+
+func get[T any](p *sync.Pool) *[]T { return p.Get().(*[]T) }
+
+func put[T any](p *sync.Pool, s *[]T) {
+ *s = (*s)[:0] // Reset.
+ p.Put(s)
+}
+
+// ComponentName returns the component name for the metric reader with the
+// provided ComponentType and ID.
+func ComponentName(componentType string, id int64) string {
+ return fmt.Sprintf("%s/%d", componentType, id)
+}
+
+// Instrumentation is experimental instrumentation for the metric reader.
+type Instrumentation struct {
+ colDuration metric.Float64Histogram
+
+ attrs []attribute.KeyValue
+ recOpt metric.RecordOption
+}
+
+// NewInstrumentation returns instrumentation for metric reader with the provided component
+// type (such as periodic and manual metric reader) and ID. It uses the global
+// MeterProvider to create the instrumentation.
+//
+// The id should be the unique metric reader instance ID. It is used
+// to set the "component.name" attribute.
+//
+// If the experimental observability is disabled, nil is returned.
+func NewInstrumentation(componentType string, id int64) (*Instrumentation, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ i := &Instrumentation{
+ attrs: []attribute.KeyValue{
+ semconv.OTelComponentName(ComponentName(componentType, id)),
+ semconv.OTelComponentTypeKey.String(componentType),
+ },
+ }
+
+ r := attribute.NewSet(i.attrs...)
+ i.recOpt = metric.WithAttributeSet(r)
+
+ meter := otel.GetMeterProvider().Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(SchemaURL),
+ )
+
+ colDuration, err := otelconv.NewSDKMetricReaderCollectionDuration(meter)
+ if err != nil {
+ err = fmt.Errorf("failed to create collection duration metric: %w", err)
+ }
+ i.colDuration = colDuration.Inst()
+
+ return i, err
+}
+
+// CollectMetrics instruments the collect method of metric reader. It returns an
+// [CollectOp] that must have its [CollectOp.End] method called when the
+// collection end.
+func (i *Instrumentation) CollectMetrics(ctx context.Context) CollectOp {
+ start := time.Now()
+
+ return CollectOp{
+ ctx: ctx,
+ start: start,
+ inst: i,
+ }
+}
+
+// CollectOp tracks the collect operation being observed by
+// [Instrumentation.CollectMetrics].
+type CollectOp struct {
+ ctx context.Context
+ start time.Time
+
+ inst *Instrumentation
+}
+
+// End completes the observation of the operation being observed by a call to
+// [Instrumentation.CollectMetrics].
+//
+// Any error that is encountered is provided as err.
+func (e CollectOp) End(err error) {
+ recOpt := get[metric.RecordOption](recordOptPool)
+ defer put(recordOptPool, recOpt)
+ *recOpt = append(*recOpt, e.inst.recordOption(err))
+
+ d := time.Since(e.start).Seconds()
+ e.inst.colDuration.Record(e.ctx, d, *recOpt...)
+}
+
+// recordOption returns a RecordOption with attributes representing the
+// outcome of the collection being recorded.
+//
+// If err is nil, the default recOpt of the Instrumentation is returned.
+//
+// Otherwise, a new RecordOption is returned with the base attributes of the
+// Instrumentation plus the error.type attribute set to the type of the error.
+func (i *Instrumentation) recordOption(err error) metric.RecordOption {
+ if err == nil {
+ return i.recOpt
+ }
+
+ attrs := get[attribute.KeyValue](measureAttrsPool)
+ defer put(measureAttrsPool, attrs)
+ *attrs = append(*attrs, i.attrs...)
+ *attrs = append(*attrs, semconv.ErrorType(err))
+
+ // Do not inefficiently make a copy of attrs by using WithAttributes
+ // instead of WithAttributeSet.
+ return metric.WithAttributeSet(attribute.NewSet(*attrs...))
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go
new file mode 100644
index 00000000..3be234a4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/concurrent_safe.go
@@ -0,0 +1,11 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package reservoir // import "go.opentelemetry.io/otel/sdk/metric/internal/reservoir"
+
+// ConcurrentSafe is an interface that can be embedded in an
+// exemplar.Reservoir to indicate to the SDK that it is safe to invoke its
+// methods concurrently. If this interface is not embedded, the SDK assumes it
+// is not safe to call concurrently and locks around Reservoir methods. This
+// is currently only used by the built-in reservoirs.
+type ConcurrentSafe interface{ concurrentSafe() }
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go
new file mode 100644
index 00000000..6cd213b5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/reservoir/doc.go
@@ -0,0 +1,6 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package reservoir contains experimental features used by built-in exemplar
+// reservoirs which require coordination with the metrics SDK.
+package reservoir // import "go.opentelemetry.io/otel/sdk/metric/internal/reservoir"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
deleted file mode 100644
index be0714a5..00000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md
+++ /dev/null
@@ -1,100 +0,0 @@
-# Experimental Features
-
-The Metric SDK contains features that have not yet stabilized in the OpenTelemetry specification.
-These features are added to the OpenTelemetry Go Metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
-
-These feature may change in backwards incompatible ways as feedback is applied.
-See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
-
-## Features
-
-- [Exemplars](#exemplars)
-- [Instrument Enabled](#instrument-enabled)
-
-### Exemplars
-
-A sample of measurements made may be exported directly as a set of exemplars.
-
-This experimental feature can be enabled by setting the `OTEL_GO_X_EXEMPLAR` environment variable.
-The value of must be the case-insensitive string of `"true"` to enable the feature.
-All other values are ignored.
-
-Exemplar filters are a supported.
-The exemplar filter applies to all measurements made.
-They filter these measurements, only allowing certain measurements to be passed to the underlying exemplar reservoir.
-
-To change the exemplar filter from the default `"trace_based"` filter set the `OTEL_METRICS_EXEMPLAR_FILTER` environment variable.
-The value must be the case-sensitive string defined by the [OpenTelemetry specification].
-
-- `"always_on"`: allows all measurements
-- `"always_off"`: denies all measurements
-- `"trace_based"`: allows only sampled measurements
-
-All values other than these will result in the default, `"trace_based"`, exemplar filter being used.
-
-[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/a6ca2fd484c9e76fe1d8e1c79c99f08f4745b5ee/specification/configuration/sdk-environment-variables.md#exemplar
-
-#### Examples
-
-Enable exemplars to be exported.
-
-```console
-export OTEL_GO_X_EXEMPLAR=true
-```
-
-Disable exemplars from being exported.
-
-```console
-unset OTEL_GO_X_EXEMPLAR
-```
-
-Set the exemplar filter to allow all measurements.
-
-```console
-export OTEL_METRICS_EXEMPLAR_FILTER=always_on
-```
-
-Set the exemplar filter to deny all measurements.
-
-```console
-export OTEL_METRICS_EXEMPLAR_FILTER=always_off
-```
-
-Set the exemplar filter to only allow sampled measurements.
-
-```console
-export OTEL_METRICS_EXEMPLAR_FILTER=trace_based
-```
-
-Revert to the default exemplar filter (`"trace_based"`)
-
-```console
-unset OTEL_METRICS_EXEMPLAR_FILTER
-```
-
-### Instrument Enabled
-
-To help users avoid performing computationally expensive operations when recording measurements, synchronous instruments provide an `Enabled` method.
-
-#### Examples
-
-The following code shows an example of how to check if an instrument implements the `EnabledInstrument` interface before using the `Enabled` function to avoid doing an expensive computation:
-
-```go
-type enabledInstrument interface { Enabled(context.Context) bool }
-
-ctr, err := m.Int64Counter("expensive-counter")
-c, ok := ctr.(enabledInstrument)
-if !ok || c.Enabled(context.Background()) {
- c.Add(expensiveComputation())
-}
-```
-
-## Compatibility and Stability
-
-Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
-These features may be removed or modified in successive version releases, including patch versions.
-
-When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
-There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
-If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
deleted file mode 100644
index 294dcf84..00000000
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Package x contains support for OTel metric SDK experimental features.
-//
-// This package should only be used for features defined in the specification.
-// It should not be used for experiments or new project ideas.
-package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
-
-import (
- "context"
- "os"
-)
-
-// Feature is an experimental feature control flag. It provides a uniform way
-// to interact with these feature flags and parse their values.
-type Feature[T any] struct {
- key string
- parse func(v string) (T, bool)
-}
-
-//nolint:unused
-func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
- const envKeyRoot = "OTEL_GO_X_"
- return Feature[T]{
- key: envKeyRoot + suffix,
- parse: parse,
- }
-}
-
-// Key returns the environment variable key that needs to be set to enable the
-// feature.
-func (f Feature[T]) Key() string { return f.key }
-
-// Lookup returns the user configured value for the feature and true if the
-// user has enabled the feature. Otherwise, if the feature is not enabled, a
-// zero-value and false are returned.
-func (f Feature[T]) Lookup() (v T, ok bool) {
- // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
- //
- // > The SDK MUST interpret an empty value of an environment variable the
- // > same way as when the variable is unset.
- vRaw := os.Getenv(f.key)
- if vRaw == "" {
- return v, ok
- }
- return f.parse(vRaw)
-}
-
-// Enabled reports whether the feature is enabled.
-func (f Feature[T]) Enabled() bool {
- _, ok := f.Lookup()
- return ok
-}
-
-// EnabledInstrument informs whether the instrument is enabled.
-//
-// EnabledInstrument interface is implemented by synchronous instruments.
-type EnabledInstrument interface {
- // Enabled reports whether the instrument will process measurements for the given context.
- //
- // This function can be used in places where measuring an instrument
- // would result in computationally expensive operations.
- Enabled(context.Context) bool
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
index 85d3dc20..5b063020 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
@@ -10,10 +10,18 @@ import (
"sync"
"sync/atomic"
+ "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric/internal/observ"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
+const (
+ // ManualReaderType uniquely identifies the OpenTelemetry Metric Reader component
+ // being instrumented.
+ manualReaderType = "go.opentelemetry.io/otel/sdk/metric/metric.ManualReader"
+)
+
// ManualReader is a simple Reader that allows an application to
// read metrics on demand.
type ManualReader struct {
@@ -26,6 +34,8 @@ type ManualReader struct {
temporalitySelector TemporalitySelector
aggregationSelector AggregationSelector
+
+ inst *observ.Instrumentation
}
// Compile time check the manualReader implements Reader and is comparable.
@@ -39,9 +49,24 @@ func NewManualReader(opts ...ManualReaderOption) *ManualReader {
aggregationSelector: cfg.aggregationSelector,
}
r.externalProducers.Store(cfg.producers)
+
+ var err error
+ r.inst, err = observ.NewInstrumentation(manualReaderType, nextManualReaderID())
+ if err != nil {
+ otel.Handle(err)
+ }
+
return r
}
+var manualReaderIDCounter atomic.Int64
+
+// nextManualReaderID returns an identifier for this manual reader,
+// starting with 0 and incrementing by 1 each time it is called.
+func nextManualReaderID() int64 {
+ return manualReaderIDCounter.Add(1) - 1
+}
+
// register stores the sdkProducer which enables the caller
// to read metrics from the SDK on demand.
func (mr *ManualReader) register(p sdkProducer) {
@@ -93,12 +118,20 @@ func (mr *ManualReader) Shutdown(context.Context) error {
//
// This method is safe to call concurrently.
func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ var err error
+ if mr.inst != nil {
+ cp := mr.inst.CollectMetrics(ctx)
+ defer func() { cp.End(err) }()
+ }
+
if rm == nil {
- return errors.New("manual reader: *metricdata.ResourceMetrics is nil")
+ err = errors.New("manual reader: *metricdata.ResourceMetrics is nil")
+ return err
}
p := mr.sdkProducer.Load()
if p == nil {
- return ErrReaderNotRegistered
+ err = ErrReaderNotRegistered
+ return err
}
ph, ok := p.(produceHolder)
@@ -107,11 +140,11 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr
// this should never happen. In the unforeseen case that this does
// happen, return an error instead of panicking so a users code does
// not halt in the processes.
- err := fmt.Errorf("manual reader: invalid producer: %T", p)
+ err = fmt.Errorf("manual reader: invalid producer: %T", p)
return err
}
- err := ph.produce(ctx, rm)
+ err = ph.produce(ctx, rm)
if err != nil {
return err
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
index 4da833cd..129cc643 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
@@ -18,8 +18,9 @@ const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTempora
var _Temporality_index = [...]uint8{0, 20, 41, 57}
func (i Temporality) String() string {
- if i >= Temporality(len(_Temporality_index)-1) {
+ idx := int(i) - 0
+ if i < 0 || idx >= len(_Temporality_index)-1 {
return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")"
}
- return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
+ return _Temporality_name[_Temporality_index[idx]:_Temporality_index[idx+1]]
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
index f08c771a..ef40ef29 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
@@ -13,7 +13,9 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/metric/internal/observ"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
)
// Default periodic reader timing.
@@ -126,9 +128,26 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri
r.run(ctx, conf.interval)
}()
+ var err error
+ r.inst, err = observ.NewInstrumentation(
+ semconv.OTelComponentTypePeriodicMetricReader.Value.AsString(),
+ nextPeriodicReaderID(),
+ )
+ if err != nil {
+ otel.Handle(err)
+ }
+
return r
}
+var periodicReaderIDCounter atomic.Int64
+
+// nextPeriodicReaderID returns an identifier for this periodic reader,
+// starting with 0 and incrementing by 1 each time it is called.
+func nextPeriodicReaderID() int64 {
+ return periodicReaderIDCounter.Add(1) - 1
+}
+
// PeriodicReader is a Reader that continuously collects and exports metric
// data at a set interval.
type PeriodicReader struct {
@@ -148,6 +167,8 @@ type PeriodicReader struct {
shutdownOnce sync.Once
rmPool sync.Pool
+
+ inst *observ.Instrumentation
}
// Compile time check the periodicReader implements Reader and is comparable.
@@ -235,8 +256,15 @@ func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMet
// collect unwraps p as a produceHolder and returns its produce results.
func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.ResourceMetrics) error {
+ var err error
+ if r.inst != nil {
+ cp := r.inst.CollectMetrics(ctx)
+ defer func() { cp.End(err) }()
+ }
+
if p == nil {
- return ErrReaderNotRegistered
+ err = ErrReaderNotRegistered
+ return err
}
ph, ok := p.(produceHolder)
@@ -245,11 +273,11 @@ func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.Reso
// this should never happen. In the unforeseen case that this does
// happen, return an error instead of panicking so a users code does
// not halt in the processes.
- err := fmt.Errorf("periodic reader: invalid producer: %T", p)
+ err = fmt.Errorf("periodic reader: invalid producer: %T", p)
return err
}
- err := ph.produce(ctx, rm)
+ err = ph.produce(ctx, rm)
if err != nil {
return err
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
index 408fddc8..ab269cdf 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
@@ -513,7 +513,10 @@ func (i *inserter[N]) aggregateFunc(
case AggregationExplicitBucketHistogram:
var noSum bool
switch kind {
- case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge:
+ case InstrumentKindUpDownCounter,
+ InstrumentKindObservableUpDownCounter,
+ InstrumentKindObservableGauge,
+ InstrumentKindGauge:
// The sum should not be collected for any instrument that can make
// negative measurements:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
@@ -523,7 +526,10 @@ func (i *inserter[N]) aggregateFunc(
case AggregationBase2ExponentialHistogram:
var noSum bool
switch kind {
- case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge:
+ case InstrumentKindUpDownCounter,
+ InstrumentKindObservableUpDownCounter,
+ InstrumentKindObservableGauge,
+ InstrumentKindGauge:
// The sum should not be collected for any instrument that can make
// negative measurements:
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
@@ -569,7 +575,11 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
}
case AggregationSum:
switch kind {
- case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter:
+ case InstrumentKindObservableCounter,
+ InstrumentKindObservableUpDownCounter,
+ InstrumentKindCounter,
+ InstrumentKindHistogram,
+ InstrumentKindUpDownCounter:
return nil
default:
// TODO: review need for aggregation check after
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
index 5c1cea82..7b205c73 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
@@ -127,10 +127,40 @@ type TemporalitySelector func(InstrumentKind) metricdata.Temporality
// DefaultTemporalitySelector is the default TemporalitySelector used if
// WithTemporalitySelector is not provided. CumulativeTemporality will be used
// for all instrument kinds if this TemporalitySelector is used.
-func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality {
+func DefaultTemporalitySelector(k InstrumentKind) metricdata.Temporality {
+ return CumulativeTemporalitySelector(k)
+}
+
+// CumulativeTemporalitySelector is the TemporalitySelector that uses
+// a cumulative temporality for all instrument kinds.
+func CumulativeTemporalitySelector(InstrumentKind) metricdata.Temporality {
return metricdata.CumulativeTemporality
}
+// DeltaTemporalitySelector is the TemporalitySelector that uses
+// a delta temporality for instrument kinds: counter, histogram, observable counter
+// All other instruments use cumulative temporality.
+func DeltaTemporalitySelector(k InstrumentKind) metricdata.Temporality {
+ switch k {
+ case InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindObservableCounter:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
+// LowMemoryTemporalitySelector is the TemporalitySelector that uses
+// delta temporality for counters and histograms. All other instruments use
+// cumulative temporality.
+func LowMemoryTemporalitySelector(k InstrumentKind) metricdata.Temporality {
+ switch k {
+ case InstrumentKindCounter, InstrumentKindHistogram:
+ return metricdata.DeltaTemporality
+ default:
+ return metricdata.CumulativeTemporality
+ }
+}
+
// AggregationSelector selects the aggregation and the parameters to use for
// that aggregation based on the InstrumentKind.
//
diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
index dd9051a7..ea9e076c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go
@@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
// version is the current release version of the metric SDK in use.
func version() string {
- return "1.38.0"
+ return "1.40.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
index 3f20eb7a..8a7bb330 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -13,7 +13,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
)
type (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
index bbe142d2..a19b39de 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -11,7 +11,7 @@ import (
"os"
"regexp"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
)
type containerIDProvider func() (string, error)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
index 4a1b017e..c4915722 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -12,7 +12,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
)
const (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
index 5fed33d4..023621ba 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -8,7 +8,7 @@ import (
"errors"
"strings"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
)
type hostIDProvider func() (string, error)
@@ -51,17 +51,16 @@ type hostIDReaderDarwin struct {
execCommand commandExecutor
}
-// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
+// read executes `/usr/sbin/ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
// from the IOPlatformUUID line. If the command fails or the uuid cannot be
// parsed an error will be returned.
func (r *hostIDReaderDarwin) read() (string, error) {
- result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
+ result, err := r.execCommand("/usr/sbin/ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
if err != nil {
return "", err
}
- lines := strings.Split(result, "\n")
- for _, line := range lines {
+ for line := range strings.SplitSeq(result, "\n") {
if strings.Contains(line, "IOPlatformUUID") {
parts := strings.Split(line, " = ")
if len(parts) == 2 {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
index cc8b8938..4c1c30f2 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build dragonfly || freebsd || netbsd || openbsd || solaris
-// +build dragonfly freebsd netbsd openbsd solaris
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
index f84f1732..4a26096c 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build linux
-// +build linux
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
index df12c44c..63ad2fa4 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
-// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
index 3677c83d..2b8ca20b 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build windows
-// +build windows
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
index 51da76e8..534809e2 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -8,7 +8,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
)
type osDescriptionProvider func() (string, error)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
index 7252af79..a1763267 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
index a6ff26a4..6c50ab68 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
index a77742b0..25f62953 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
@@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
-// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
index 138e5772..a1189553 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -11,7 +11,7 @@ import (
"path/filepath"
"runtime"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
)
type (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
index 9bc3e525..7d15cbb9 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -6,20 +6,14 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
"errors"
- "fmt"
"sync"
"sync/atomic"
"time"
"go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/sdk"
- "go.opentelemetry.io/otel/sdk/internal/env"
- "go.opentelemetry.io/otel/sdk/trace/internal/x"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
- "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
+ "go.opentelemetry.io/otel/sdk/trace/internal/env"
+ "go.opentelemetry.io/otel/sdk/trace/internal/observ"
"go.opentelemetry.io/otel/trace"
)
@@ -33,8 +27,6 @@ const (
DefaultMaxExportBatchSize = 512
)
-var queueFull = otelconv.ErrorTypeAttr("queue_full")
-
// BatchSpanProcessorOption configures a BatchSpanProcessor.
type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
@@ -78,10 +70,7 @@ type batchSpanProcessor struct {
queue chan ReadOnlySpan
dropped uint32
- selfObservabilityEnabled bool
- callbackRegistration metric.Registration
- spansProcessedCounter otelconv.SDKProcessorSpanProcessed
- componentNameAttr attribute.KeyValue
+ inst *observ.BSP
batch []ReadOnlySpan
batchMutex sync.Mutex
@@ -124,19 +113,14 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
stopCh: make(chan struct{}),
}
- if x.SelfObservability.Enabled() {
- bsp.selfObservabilityEnabled = true
- bsp.componentNameAttr = componentName()
-
- var err error
- bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs(
- bsp.componentNameAttr,
- func() int64 { return int64(len(bsp.queue)) },
- int64(bsp.o.MaxQueueSize),
- )
- if err != nil {
- otel.Handle(err)
- }
+ var err error
+ bsp.inst, err = observ.NewBSP(
+ nextProcessorID(),
+ func() int64 { return int64(len(bsp.queue)) },
+ int64(bsp.o.MaxQueueSize),
+ )
+ if err != nil {
+ otel.Handle(err)
}
bsp.stopWait.Add(1)
@@ -157,51 +141,6 @@ func nextProcessorID() int64 {
return processorIDCounter.Add(1) - 1
}
-func componentName() attribute.KeyValue {
- id := nextProcessorID()
- name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id)
- return semconv.OTelComponentName(name)
-}
-
-// newBSPObs creates and returns a new set of metrics instruments and a
-// registration for a BatchSpanProcessor. It is the caller's responsibility
-// to unregister the registration when it is no longer needed.
-func newBSPObs(
- cmpnt attribute.KeyValue,
- qLen func() int64,
- qMax int64,
-) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) {
- meter := otel.GetMeterProvider().Meter(
- selfObsScopeName,
- metric.WithInstrumentationVersion(sdk.Version()),
- metric.WithSchemaURL(semconv.SchemaURL),
- )
-
- qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter)
-
- qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter)
- err = errors.Join(err, e)
-
- spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter)
- err = errors.Join(err, e)
-
- cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor
- attrs := metric.WithAttributes(cmpnt, cmpntT)
-
- reg, e := meter.RegisterCallback(
- func(_ context.Context, o metric.Observer) error {
- o.ObserveInt64(qSize.Inst(), qLen(), attrs)
- o.ObserveInt64(qCap.Inst(), qMax, attrs)
- return nil
- },
- qSize.Inst(),
- qCap.Inst(),
- )
- err = errors.Join(err, e)
-
- return spansProcessed, reg, err
-}
-
// OnStart method does nothing.
func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
@@ -242,8 +181,8 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
case <-ctx.Done():
err = ctx.Err()
}
- if bsp.selfObservabilityEnabled {
- err = errors.Join(err, bsp.callbackRegistration.Unregister())
+ if bsp.inst != nil {
+ err = errors.Join(err, bsp.inst.Shutdown())
}
})
return err
@@ -357,10 +296,8 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
if l := len(bsp.batch); l > 0 {
global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
- if bsp.selfObservabilityEnabled {
- bsp.spansProcessedCounter.Add(ctx, int64(l),
- bsp.componentNameAttr,
- bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor))
+ if bsp.inst != nil {
+ bsp.inst.Processed(ctx, int64(l))
}
err := bsp.e.ExportSpans(ctx, bsp.batch)
@@ -470,11 +407,8 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R
case bsp.queue <- sd:
return true
case <-ctx.Done():
- if bsp.selfObservabilityEnabled {
- bsp.spansProcessedCounter.Add(ctx, 1,
- bsp.componentNameAttr,
- bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
- bsp.spansProcessedCounter.AttrErrorType(queueFull))
+ if bsp.inst != nil {
+ bsp.inst.ProcessedQueueFull(ctx, 1)
}
return false
}
@@ -490,11 +424,8 @@ func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan)
return true
default:
atomic.AddUint32(&bsp.dropped, 1)
- if bsp.selfObservabilityEnabled {
- bsp.spansProcessedCounter.Add(ctx, 1,
- bsp.componentNameAttr,
- bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
- bsp.spansProcessedCounter.AttrErrorType(queueFull))
+ if bsp.inst != nil {
+ bsp.inst.ProcessedQueueFull(ctx, 1)
}
}
return false
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
index e58e7f6e..b502c7d4 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -7,7 +7,7 @@ Package trace contains support for OpenTelemetry distributed tracing.
The following assumes a basic familiarity with OpenTelemetry concepts.
See https://opentelemetry.io.
-See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about
+See [go.opentelemetry.io/otel/sdk/internal/x] for information about
the experimental features.
*/
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go
similarity index 98%
rename from vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
rename to vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go
index e3309231..58f68df4 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/env/env.go
@@ -3,7 +3,7 @@
// Package env provides types and functionality for environment variable support
// in the OpenTelemetry SDK.
-package env // import "go.opentelemetry.io/otel/sdk/internal/env"
+package env // import "go.opentelemetry.io/otel/sdk/trace/internal/env"
import (
"os"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go
new file mode 100644
index 00000000..d9cfba0b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/batch_span_processor.go
@@ -0,0 +1,119 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
+ "go.opentelemetry.io/otel/sdk/internal/x"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
+ "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv"
+)
+
+const (
+ // ScopeName is the name of the instrumentation scope.
+ ScopeName = "go.opentelemetry.io/otel/sdk/trace/internal/observ"
+
+ // SchemaURL is the schema URL of the instrumentation.
+ SchemaURL = semconv.SchemaURL
+)
+
+// ErrQueueFull is the attribute value for the "queue_full" error type.
+var ErrQueueFull = otelconv.SDKProcessorSpanProcessed{}.AttrErrorType(
+ otelconv.ErrorTypeAttr("queue_full"),
+)
+
+// BSPComponentName returns the component name attribute for a
+// BatchSpanProcessor with the given ID.
+func BSPComponentName(id int64) attribute.KeyValue {
+ t := otelconv.ComponentTypeBatchingSpanProcessor
+ name := fmt.Sprintf("%s/%d", t, id)
+ return semconv.OTelComponentName(name)
+}
+
+// BSP is the instrumentation for an OTel SDK BatchSpanProcessor.
+type BSP struct {
+ reg metric.Registration
+
+ processed metric.Int64Counter
+ processedOpts []metric.AddOption
+ processedQueueFullOpts []metric.AddOption
+}
+
+func NewBSP(id int64, qLen func() int64, qMax int64) (*BSP, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ meter := otel.GetMeterProvider().Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(SchemaURL),
+ )
+
+ qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter)
+ if err != nil {
+ err = fmt.Errorf("failed to create BSP queue capacity metric: %w", err)
+ }
+ qCapInst := qCap.Inst()
+
+ qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter)
+ if e != nil {
+ e := fmt.Errorf("failed to create BSP queue size metric: %w", e)
+ err = errors.Join(err, e)
+ }
+ qSizeInst := qSize.Inst()
+
+ cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor
+ cmpnt := BSPComponentName(id)
+ set := attribute.NewSet(cmpnt, cmpntT)
+
+ obsOpts := []metric.ObserveOption{metric.WithAttributeSet(set)}
+ reg, e := meter.RegisterCallback(
+ func(_ context.Context, o metric.Observer) error {
+ o.ObserveInt64(qSizeInst, qLen(), obsOpts...)
+ o.ObserveInt64(qCapInst, qMax, obsOpts...)
+ return nil
+ },
+ qSizeInst,
+ qCapInst,
+ )
+ if e != nil {
+ e := fmt.Errorf("failed to register BSP queue size/capacity callback: %w", e)
+ err = errors.Join(err, e)
+ }
+
+ processed, e := otelconv.NewSDKProcessorSpanProcessed(meter)
+ if e != nil {
+ e := fmt.Errorf("failed to create BSP processed spans metric: %w", e)
+ err = errors.Join(err, e)
+ }
+ processedOpts := []metric.AddOption{metric.WithAttributeSet(set)}
+
+ set = attribute.NewSet(cmpnt, cmpntT, ErrQueueFull)
+ processedQueueFullOpts := []metric.AddOption{metric.WithAttributeSet(set)}
+
+ return &BSP{
+ reg: reg,
+ processed: processed.Inst(),
+ processedOpts: processedOpts,
+ processedQueueFullOpts: processedQueueFullOpts,
+ }, err
+}
+
+func (b *BSP) Shutdown() error { return b.reg.Unregister() }
+
+func (b *BSP) Processed(ctx context.Context, n int64) {
+ b.processed.Add(ctx, n, b.processedOpts...)
+}
+
+func (b *BSP) ProcessedQueueFull(ctx context.Context, n int64) {
+ b.processed.Add(ctx, n, b.processedQueueFullOpts...)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go
new file mode 100644
index 00000000..b542121e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/doc.go
@@ -0,0 +1,6 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package observ provides observability instrumentation for the OTel trace SDK
+// package.
+package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go
new file mode 100644
index 00000000..8afd0526
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/simple_span_processor.go
@@ -0,0 +1,97 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
+ "go.opentelemetry.io/otel/sdk/internal/x"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
+ "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv"
+)
+
+var measureAttrsPool = sync.Pool{
+ New: func() any {
+ // "component.name" + "component.type" + "error.type"
+ const n = 1 + 1 + 1
+ s := make([]attribute.KeyValue, 0, n)
+ // Return a pointer to a slice instead of a slice itself
+ // to avoid allocations on every call.
+ return &s
+ },
+}
+
+// SSP is the instrumentation for an OTel SDK SimpleSpanProcessor.
+type SSP struct {
+ spansProcessedCounter metric.Int64Counter
+ addOpts []metric.AddOption
+ attrs []attribute.KeyValue
+}
+
+// SSPComponentName returns the component name attribute for a
+// SimpleSpanProcessor with the given ID.
+func SSPComponentName(id int64) attribute.KeyValue {
+ t := otelconv.ComponentTypeSimpleSpanProcessor
+ name := fmt.Sprintf("%s/%d", t, id)
+ return semconv.OTelComponentName(name)
+}
+
+// NewSSP returns instrumentation for an OTel SDK SimpleSpanProcessor with the
+// provided ID.
+//
+// If the experimental observability is disabled, nil is returned.
+func NewSSP(id int64) (*SSP, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ meter := otel.GetMeterProvider().Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(SchemaURL),
+ )
+ spansProcessedCounter, err := otelconv.NewSDKProcessorSpanProcessed(meter)
+ if err != nil {
+ err = fmt.Errorf("failed to create SSP processed spans metric: %w", err)
+ }
+
+ componentName := SSPComponentName(id)
+ componentType := spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeSimpleSpanProcessor)
+ attrs := []attribute.KeyValue{componentName, componentType}
+ addOpts := []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(attrs...))}
+
+ return &SSP{
+ spansProcessedCounter: spansProcessedCounter.Inst(),
+ addOpts: addOpts,
+ attrs: attrs,
+ }, err
+}
+
+// SpanProcessed records that a span has been processed by the SimpleSpanProcessor.
+// If err is non-nil, it records the processing error as an attribute.
+func (ssp *SSP) SpanProcessed(ctx context.Context, err error) {
+ ssp.spansProcessedCounter.Add(ctx, 1, ssp.addOption(err)...)
+}
+
+func (ssp *SSP) addOption(err error) []metric.AddOption {
+ if err == nil {
+ return ssp.addOpts
+ }
+ attrs := measureAttrsPool.Get().(*[]attribute.KeyValue)
+ defer func() {
+ *attrs = (*attrs)[:0] // reset the slice for reuse
+ measureAttrsPool.Put(attrs)
+ }()
+ *attrs = append(*attrs, ssp.attrs...)
+ *attrs = append(*attrs, semconv.ErrorType(err))
+ // Do not inefficiently make a copy of attrs by using
+ // WithAttributes instead of WithAttributeSet.
+ return []metric.AddOption{metric.WithAttributeSet(attribute.NewSet(*attrs...))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go
new file mode 100644
index 00000000..13a2db29
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/observ/tracer.go
@@ -0,0 +1,223 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package observ // import "go.opentelemetry.io/otel/sdk/trace/internal/observ"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
+ "go.opentelemetry.io/otel/sdk/internal/x"
+ "go.opentelemetry.io/otel/semconv/v1.39.0/otelconv"
+ "go.opentelemetry.io/otel/trace"
+)
+
+var meterOpts = []metric.MeterOption{
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(SchemaURL),
+}
+
+// Tracer is instrumentation for an OTel SDK Tracer.
+type Tracer struct {
+ enabled bool
+
+ live metric.Int64UpDownCounter
+ started metric.Int64Counter
+}
+
+func NewTracer() (Tracer, error) {
+ if !x.Observability.Enabled() {
+ return Tracer{}, nil
+ }
+ meter := otel.GetMeterProvider().Meter(ScopeName, meterOpts...)
+
+ var err error
+ l, e := otelconv.NewSDKSpanLive(meter)
+ if e != nil {
+ e = fmt.Errorf("failed to create span live metric: %w", e)
+ err = errors.Join(err, e)
+ }
+
+ s, e := otelconv.NewSDKSpanStarted(meter)
+ if e != nil {
+ e = fmt.Errorf("failed to create span started metric: %w", e)
+ err = errors.Join(err, e)
+ }
+
+ return Tracer{enabled: true, live: l.Inst(), started: s.Inst()}, err
+}
+
+func (t Tracer) Enabled() bool { return t.enabled }
+
+func (t Tracer) SpanStarted(ctx context.Context, psc trace.SpanContext, span trace.Span) {
+ key := spanStartedKey{
+ parent: parentStateNoParent,
+ sampling: samplingStateDrop,
+ }
+
+ if psc.IsValid() {
+ if psc.IsRemote() {
+ key.parent = parentStateRemoteParent
+ } else {
+ key.parent = parentStateLocalParent
+ }
+ }
+
+ if span.IsRecording() {
+ if span.SpanContext().IsSampled() {
+ key.sampling = samplingStateRecordAndSample
+ } else {
+ key.sampling = samplingStateRecordOnly
+ }
+ }
+
+ opts := spanStartedOpts[key]
+ t.started.Add(ctx, 1, opts...)
+}
+
+func (t Tracer) SpanLive(ctx context.Context, span trace.Span) {
+ t.spanLive(ctx, 1, span)
+}
+
+func (t Tracer) SpanEnded(ctx context.Context, span trace.Span) {
+ t.spanLive(ctx, -1, span)
+}
+
+func (t Tracer) spanLive(ctx context.Context, value int64, span trace.Span) {
+ key := spanLiveKey{sampled: span.SpanContext().IsSampled()}
+ opts := spanLiveOpts[key]
+ t.live.Add(ctx, value, opts...)
+}
+
+type parentState int
+
+const (
+ parentStateNoParent parentState = iota
+ parentStateLocalParent
+ parentStateRemoteParent
+)
+
+type samplingState int
+
+const (
+ samplingStateDrop samplingState = iota
+ samplingStateRecordOnly
+ samplingStateRecordAndSample
+)
+
+type spanStartedKey struct {
+ parent parentState
+ sampling samplingState
+}
+
+var spanStartedOpts = map[spanStartedKey][]metric.AddOption{
+ {
+ parentStateNoParent,
+ samplingStateDrop,
+ }: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ )),
+ },
+ {
+ parentStateLocalParent,
+ samplingStateDrop,
+ }: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ )),
+ },
+ {
+ parentStateRemoteParent,
+ samplingStateDrop,
+ }: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ )),
+ },
+
+ {
+ parentStateNoParent,
+ samplingStateRecordOnly,
+ }: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ )),
+ },
+ {
+ parentStateLocalParent,
+ samplingStateRecordOnly,
+ }: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ )),
+ },
+ {
+ parentStateRemoteParent,
+ samplingStateRecordOnly,
+ }: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ )),
+ },
+
+ {
+ parentStateNoParent,
+ samplingStateRecordAndSample,
+ }: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ )),
+ },
+ {
+ parentStateLocalParent,
+ samplingStateRecordAndSample,
+ }: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ )),
+ },
+ {
+ parentStateRemoteParent,
+ samplingStateRecordAndSample,
+ }: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ )),
+ },
+}
+
+type spanLiveKey struct {
+ sampled bool
+}
+
+var spanLiveOpts = map[spanLiveKey][]metric.AddOption{
+ {true}: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordAndSample,
+ ),
+ )),
+ },
+ {false}: {
+ metric.WithAttributeSet(attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordOnly,
+ ),
+ )),
+ },
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
index 37ce2ac8..d2cf4ebd 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -5,29 +5,21 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
- "errors"
"fmt"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
- "go.opentelemetry.io/otel/sdk/trace/internal/x"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
- "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
+ "go.opentelemetry.io/otel/sdk/trace/internal/observ"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/noop"
)
-const (
- defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
- selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace"
-)
+const defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
// tracerProviderConfig.
type tracerProviderConfig struct {
@@ -163,19 +155,16 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
t, ok := p.namedTracer[is]
if !ok {
t = &tracer{
- provider: p,
- instrumentationScope: is,
- selfObservabilityEnabled: x.SelfObservability.Enabled(),
+ provider: p,
+ instrumentationScope: is,
}
- if t.selfObservabilityEnabled {
- var err error
- t.spanLiveMetric, t.spanStartedMetric, err = newInst()
- if err != nil {
- msg := "failed to create self-observability metrics for tracer: %w"
- err := fmt.Errorf(msg, err)
- otel.Handle(err)
- }
+
+ var err error
+ t.inst, err = observ.NewTracer()
+ if err != nil {
+ otel.Handle(err)
}
+
p.namedTracer[is] = t
}
return t, ok
@@ -201,23 +190,6 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
return t
}
-func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) {
- m := otel.GetMeterProvider().Meter(
- selfObsScopeName,
- metric.WithInstrumentationVersion(sdk.Version()),
- metric.WithSchemaURL(semconv.SchemaURL),
- )
-
- var err error
- spanLiveMetric, e := otelconv.NewSDKSpanLive(m)
- err = errors.Join(err, e)
-
- spanStartedMetric, e := otelconv.NewSDKSpanStarted(m)
- err = errors.Join(err, e)
-
- return spanLiveMetric, spanStartedMetric, err
-}
-
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
// This check prevents calls during a shutdown.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
index 689663d4..81c5060a 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -280,3 +280,31 @@ func (pb parentBased) Description() string {
pb.config.localParentNotSampled.Description(),
)
}
+
+// AlwaysRecord returns a sampler decorator which ensures that every span
+// is passed to the SpanProcessor, even those that would be normally dropped.
+// It converts `Drop` decisions from the root sampler into `RecordOnly` decisions,
+// allowing processors to see all spans without sending them to exporters. This is
+// typically used to enable accurate span-to-metrics processing.
+func AlwaysRecord(root Sampler) Sampler {
+ return alwaysRecord{root}
+}
+
+type alwaysRecord struct {
+ root Sampler
+}
+
+func (ar alwaysRecord) ShouldSample(p SamplingParameters) SamplingResult {
+ rootSamplerSamplingResult := ar.root.ShouldSample(p)
+ if rootSamplerSamplingResult.Decision == Drop {
+ return SamplingResult{
+ Decision: RecordOnly,
+ Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+ }
+ }
+ return rootSamplerSamplingResult
+}
+
+func (ar alwaysRecord) Description() string {
+ return "AlwaysRecord{root:" + ar.root.Description() + "}"
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
index 411d9ccd..771e427a 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -6,9 +6,12 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
"sync"
+ "sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/trace/internal/observ"
+ "go.opentelemetry.io/otel/trace"
)
// simpleSpanProcessor is a SpanProcessor that synchronously sends all
@@ -17,6 +20,8 @@ type simpleSpanProcessor struct {
exporterMu sync.Mutex
exporter SpanExporter
stopOnce sync.Once
+
+ inst *observ.SSP
}
var _ SpanProcessor = (*simpleSpanProcessor)(nil)
@@ -33,11 +38,26 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
ssp := &simpleSpanProcessor{
exporter: exporter,
}
+
+ var err error
+ ssp.inst, err = observ.NewSSP(nextSimpleProcessorID())
+ if err != nil {
+ otel.Handle(err)
+ }
+
global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.")
return ssp
}
+var simpleProcessorIDCounter atomic.Int64
+
+// nextSimpleProcessorID returns an identifier for this simple span processor,
+// starting with 0 and incrementing by 1 each time it is called.
+func nextSimpleProcessorID() int64 {
+ return simpleProcessorIDCounter.Add(1) - 1
+}
+
// OnStart does nothing.
func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
@@ -46,11 +66,20 @@ func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
ssp.exporterMu.Lock()
defer ssp.exporterMu.Unlock()
+ var err error
if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
- if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
+ err = ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s})
+ if err != nil {
otel.Handle(err)
}
}
+
+ if ssp.inst != nil {
+ // Add the span to the context to ensure the metric is recorded
+ // with the correct span context.
+ ctx := trace.ContextWithSpanContext(context.Background(), s.SpanContext())
+ ssp.inst.SpanProcessed(ctx, err)
+ }
}
// Shutdown shuts down the exporter this SimpleSpanProcessor exports to.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index b376051f..d4666105 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -151,6 +151,12 @@ type recordingSpan struct {
// tracer is the SDK tracer that created this span.
tracer *tracer
+
+ // origCtx is the context used when starting this span that has the
+ // recordingSpan instance set as the active span. If not nil, it is used
+ // when ending the span to ensure any metrics are recorded with a context
+ // containing this span without requiring an additional allocation.
+ origCtx context.Context
}
var (
@@ -158,6 +164,10 @@ var (
_ runtimeTracer = (*recordingSpan)(nil)
)
+func (s *recordingSpan) setOrigCtx(ctx context.Context) {
+ s.origCtx = ctx
+}
+
// SpanContext returns the SpanContext of this span.
func (s *recordingSpan) SpanContext() trace.SpanContext {
if s == nil {
@@ -496,14 +506,15 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
}
s.mu.Unlock()
- if s.tracer.selfObservabilityEnabled {
- defer func() {
- // Add the span to the context to ensure the metric is recorded
- // with the correct span context.
- ctx := trace.ContextWithSpan(context.Background(), s)
- set := spanLiveSet(s.spanContext.IsSampled())
- s.tracer.spanLiveMetric.AddSet(ctx, -1, set)
- }()
+ if s.tracer.inst.Enabled() {
+ ctx := s.origCtx
+ if ctx == nil {
+ // This should not happen as the origCtx should be set, but
+ // ensure trace information is propagated in the case of an
+ // error.
+ ctx = trace.ContextWithSpan(context.Background(), s)
+ }
+ defer s.tracer.inst.SpanEnded(ctx, s)
}
sps := s.tracer.provider.getSpanProcessors()
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
index bec5e209..321d9743 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
@@ -3,7 +3,7 @@
package trace // import "go.opentelemetry.io/otel/sdk/trace"
-import "go.opentelemetry.io/otel/sdk/internal/env"
+import "go.opentelemetry.io/otel/sdk/trace/internal/env"
const (
// DefaultAttributeValueLengthLimit is the default maximum allowed
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
index e965c4cc..e1d08fd4 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -7,9 +7,8 @@ import (
"context"
"time"
- "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/instrumentation"
- "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
+ "go.opentelemetry.io/otel/sdk/trace/internal/observ"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -20,9 +19,7 @@ type tracer struct {
provider *TracerProvider
instrumentationScope instrumentation.Scope
- selfObservabilityEnabled bool
- spanLiveMetric otelconv.SDKSpanLive
- spanStartedMetric otelconv.SDKSpanStarted
+ inst observ.Tracer
}
var _ trace.Tracer = &tracer{}
@@ -53,10 +50,17 @@ func (tr *tracer) Start(
s := tr.newSpan(ctx, name, &config)
newCtx := trace.ContextWithSpan(ctx, s)
- if tr.selfObservabilityEnabled {
+ if tr.inst.Enabled() {
+ if o, ok := s.(interface{ setOrigCtx(context.Context) }); ok {
+ // If this is a recording span, store the original context.
+ // This allows later retrieval of baggage and other information
+ // that may have been stored in the context at span start time and
+ // to avoid the allocation of repeatedly calling
+ // trace.ContextWithSpan.
+ o.setOrigCtx(newCtx)
+ }
psc := trace.SpanContextFromContext(ctx)
- set := spanStartedSet(psc, s)
- tr.spanStartedMetric.AddSet(newCtx, 1, set)
+ tr.inst.SpanStarted(newCtx, psc, s)
}
if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
@@ -168,12 +172,11 @@ func (tr *tracer) newRecordingSpan(
s.SetAttributes(sr.Attributes...)
s.SetAttributes(config.Attributes()...)
- if tr.selfObservabilityEnabled {
+ if tr.inst.Enabled() {
// Propagate any existing values from the context with the new span to
// the measurement context.
ctx = trace.ContextWithSpan(ctx, s)
- set := spanLiveSet(s.spanContext.IsSampled())
- tr.spanLiveMetric.AddSet(ctx, 1, set)
+ tr.inst.SpanLive(ctx, s)
}
return s
@@ -183,112 +186,3 @@ func (tr *tracer) newRecordingSpan(
func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
return nonRecordingSpan{tracer: tr, sc: sc}
}
-
-type parentState int
-
-const (
- parentStateNoParent parentState = iota
- parentStateLocalParent
- parentStateRemoteParent
-)
-
-type samplingState int
-
-const (
- samplingStateDrop samplingState = iota
- samplingStateRecordOnly
- samplingStateRecordAndSample
-)
-
-type spanStartedSetKey struct {
- parent parentState
- sampling samplingState
-}
-
-var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{
- {parentStateNoParent, samplingStateDrop}: attribute.NewSet(
- otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
- otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
- ),
- {parentStateLocalParent, samplingStateDrop}: attribute.NewSet(
- otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
- otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
- ),
- {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet(
- otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
- otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
- ),
-
- {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet(
- otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
- otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
- ),
- {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet(
- otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
- otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
- ),
- {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet(
- otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
- otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
- ),
-
- {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet(
- otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
- otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
- ),
- {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet(
- otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
- otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
- ),
- {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet(
- otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
- otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
- ),
-}
-
-func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set {
- key := spanStartedSetKey{
- parent: parentStateNoParent,
- sampling: samplingStateDrop,
- }
-
- if psc.IsValid() {
- if psc.IsRemote() {
- key.parent = parentStateRemoteParent
- } else {
- key.parent = parentStateLocalParent
- }
- }
-
- if span.IsRecording() {
- if span.SpanContext().IsSampled() {
- key.sampling = samplingStateRecordAndSample
- } else {
- key.sampling = samplingStateRecordOnly
- }
- }
-
- return spanStartedSetCache[key]
-}
-
-type spanLiveSetKey struct {
- sampled bool
-}
-
-var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
- {true}: attribute.NewSet(
- otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
- otelconv.SpanSamplingResultRecordAndSample,
- ),
- ),
- {false}: attribute.NewSet(
- otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
- otelconv.SpanSamplingResultRecordOnly,
- ),
- ),
-}
-
-func spanLiveSet(sampled bool) attribute.Set {
- key := spanLiveSetKey{sampled: sampled}
- return spanLiveSetCache[key]
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
index 7f97cc31..b5497c28 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
- return "1.38.0"
+ return "1.40.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
deleted file mode 100644
index 2de1fc3c..00000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.26.0
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
deleted file mode 100644
index d8dc822b..00000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
+++ /dev/null
@@ -1,8996 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The Android platform on which the Android application is running.
-const (
- // AndroidOSAPILevelKey is the attribute Key conforming to the
- // "android.os.api_level" semantic conventions. It represents the uniquely
- // identifies the framework API revision offered by a version
- // (`os.version`) of the android operating system. More information can be
- // found
- // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '33', '32'
- AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
-)
-
-// AndroidOSAPILevel returns an attribute KeyValue conforming to the
-// "android.os.api_level" semantic conventions. It represents the uniquely
-// identifies the framework API revision offered by a version (`os.version`) of
-// the android operating system. More information can be found
-// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
-func AndroidOSAPILevel(val string) attribute.KeyValue {
- return AndroidOSAPILevelKey.String(val)
-}
-
-// ASP.NET Core attributes
-const (
- // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.result" semantic conventions. It represents
- // the rate-limiting result, shows whether the lease was acquired or
- // contains a rejection reason
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'acquired', 'request_canceled'
- AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
-
- // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
- // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
- // represents the full type name of the
- // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
- // implementation that handled the exception.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if and only if the exception
- // was handled by this handler.)
- // Stability: stable
- // Examples: 'Contoso.MyHandler'
- AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
-
- // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
- // to the "aspnetcore.diagnostics.exception.result" semantic conventions.
- // It represents the aSP.NET Core exception middleware handling result
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'handled', 'unhandled'
- AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
-
- // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
- // the rate limiting policy name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fixed', 'sliding', 'token'
- AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
-
- // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
- // "aspnetcore.request.is_unhandled" semantic conventions. It represents
- // the flag indicating if request was handled by the application pipeline.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
-
- // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
- // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
- // value that indicates whether the matched route is a fallback route.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
-
- // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
- // "aspnetcore.routing.match_status" semantic conventions. It represents
- // the match result - success or failure
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'success', 'failure'
- AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
-)
-
-var (
- // Lease was acquired
- AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
- // Lease request was rejected by the endpoint limiter
- AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
- // Lease request was rejected by the global limiter
- AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
- // Lease request was canceled
- AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
-)
-
-var (
- // Exception was handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
- // Exception was not handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
- // Exception handling was skipped because the response had started
- AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
- // Exception handling didn't run because the request was aborted
- AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
-)
-
-var (
- // Match succeeded
- AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
- // Match failed
- AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
-)
-
-// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
-// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
-// represents the full type name of the
-// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
-// implementation that handled the exception.
-func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
- return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
-}
-
-// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
-// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
-// the rate limiting policy name.
-func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
- return AspnetcoreRateLimitingPolicyKey.String(val)
-}
-
-// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
-// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
-// the flag indicating if request was handled by the application pipeline.
-func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
- return AspnetcoreRequestIsUnhandledKey.Bool(val)
-}
-
-// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
-// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
-// value that indicates whether the matched route is a fallback route.
-func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
- return AspnetcoreRoutingIsFallbackKey.Bool(val)
-}
-
-// Generic attributes for AWS services.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes for AWS DynamoDB.
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the number of
-// items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// Attributes for AWS Elastic Container Service (ECS).
-const (
- // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
- // semantic conventions. It represents the ID of a running ECS task. The ID
- // MUST be extracted from `task.arn`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
- // populated.)
- // Stability: experimental
- // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
- // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
- // running [ECS
- // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
- // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the family
- // name of the [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
- // used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for the task definition used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSTaskID returns an attribute KeyValue conforming to the
-// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
-// ECS task. The ID MUST be extracted from `task.arn`.
-func AWSECSTaskID(val string) attribute.KeyValue {
- return AWSECSTaskIDKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
-// [ECS
-// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the family name of
-// the [ECS task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
-// used to create the ECS task.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// the task definition used to create the ECS task.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Attributes for AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Attributes for AWS Logs.
-const (
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-)
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// Attributes for AWS Lambda.
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for AWS S3.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// The web browser attributes
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// These attributes may be used to describe the client in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ClientAddressKey is the attribute Key conforming to the "client.address"
- // semantic conventions. It represents the client address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.address` SHOULD represent the client address
- // behind any intermediaries, for example proxies, if it's available.
- ClientAddressKey = attribute.Key("client.address")
-
- // ClientPortKey is the attribute Key conforming to the "client.port"
- // semantic conventions. It represents the client port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.port` SHOULD represent the client port behind
- // any intermediaries, for example proxies, if it's available.
- ClientPortKey = attribute.Key("client.port")
-)
-
-// ClientAddress returns an attribute KeyValue conforming to the
-// "client.address" semantic conventions. It represents the client address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func ClientAddress(val string) attribute.KeyValue {
- return ClientAddressKey.String(val)
-}
-
-// ClientPort returns an attribute KeyValue conforming to the "client.port"
-// semantic conventions. It represents the client port number.
-func ClientPort(val int) attribute.KeyValue {
- return ClientPortKey.Int(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS).
-const (
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Apps
- CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Bare Metal Solution (BMS)
- CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
-// Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// Attributes for CloudEvents.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeStacktraceKey is the attribute Key conforming to the
- // "code.stacktrace" semantic conventions. It represents a stacktrace as a
- // string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'at
- // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- CodeStacktraceKey = attribute.Key("code.stacktrace")
-)
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeStacktrace returns an attribute KeyValue conforming to the
-// "code.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func CodeStacktrace(val string) attribute.KeyValue {
- return CodeStacktraceKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerCommandKey is the attribute Key conforming to the
- // "container.command" semantic conventions. It represents the command used
- // to run the container (i.e. the command name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol'
- // Note: If using embedded credentials or sensitive data, it is recommended
- // to remove them to prevent potential leakage.
- ContainerCommandKey = attribute.Key("container.command")
-
- // ContainerCommandArgsKey is the attribute Key conforming to the
- // "container.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) run by the
- // container. [2]
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol, --config, config.yaml'
- ContainerCommandArgsKey = attribute.Key("container.command_args")
-
- // ContainerCommandLineKey is the attribute Key conforming to the
- // "container.command_line" semantic conventions. It represents the full
- // command run by the container as a single string representing the full
- // command. [2]
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol --config config.yaml'
- ContainerCommandLineKey = attribute.Key("container.command_line")
-
- // ContainerCPUStateKey is the attribute Key conforming to the
- // "container.cpu.state" semantic conventions. It represents the CPU state
- // for this data point.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'user', 'kernel'
- ContainerCPUStateKey = attribute.Key("container.cpu.state")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerImageIDKey is the attribute Key conforming to the
- // "container.image.id" semantic conventions. It represents the runtime
- // specific image identifier. Usually a hash algorithm followed by a UUID.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
- // Note: Docker defines a sha256 of the image id; `container.image.id`
- // corresponds to the `Image` field from the Docker container inspect
- // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
- // endpoint.
- // K8S defines a link to the container registry repository with digest
- // `"imageID": "registry.azurecr.io
- // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
- // The ID is assigned by the container runtime and can vary in different
- // environments. Consider using `oci.manifest.digest` if it is important to
- // identify the same image in different environments/runtimes.
- ContainerImageIDKey = attribute.Key("container.image.id")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageRepoDigestsKey is the attribute Key conforming to the
- // "container.image.repo_digests" semantic conventions. It represents the
- // repo digests of the container image as provided by the container
- // runtime.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
- // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
- // Note:
- // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
- // and
- // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
- // report those under the `RepoDigests` field.
- ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
-
- // ContainerImageTagsKey is the attribute Key conforming to the
- // "container.image.tags" semantic conventions. It represents the container
- // image tags. An example can be found in [Docker Image
- // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
- // Should be only the `` section of the full name for example from
- // `registry.example.com/my-org/my-image:`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'v1.27.1', '3.5.7-0'
- ContainerImageTagsKey = attribute.Key("container.image.tags")
-
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-)
-
-var (
- // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
- ContainerCPUStateUser = ContainerCPUStateKey.String("user")
- // When CPU is used by the system (host OS)
- ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
- // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
- ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
-)
-
-// ContainerCommand returns an attribute KeyValue conforming to the
-// "container.command" semantic conventions. It represents the command used to
-// run the container (i.e. the command name).
-func ContainerCommand(val string) attribute.KeyValue {
- return ContainerCommandKey.String(val)
-}
-
-// ContainerCommandArgs returns an attribute KeyValue conforming to the
-// "container.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) run by the
-// container. [2]
-func ContainerCommandArgs(val ...string) attribute.KeyValue {
- return ContainerCommandArgsKey.StringSlice(val)
-}
-
-// ContainerCommandLine returns an attribute KeyValue conforming to the
-// "container.command_line" semantic conventions. It represents the full
-// command run by the container as a single string representing the full
-// command. [2]
-func ContainerCommandLine(val string) attribute.KeyValue {
- return ContainerCommandLineKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerImageID returns an attribute KeyValue conforming to the
-// "container.image.id" semantic conventions. It represents the runtime
-// specific image identifier. Usually a hash algorithm followed by a UUID.
-func ContainerImageID(val string) attribute.KeyValue {
- return ContainerImageIDKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
-// "container.image.repo_digests" semantic conventions. It represents the repo
-// digests of the container image as provided by the container runtime.
-func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
- return ContainerImageRepoDigestsKey.StringSlice(val)
-}
-
-// ContainerImageTags returns an attribute KeyValue conforming to the
-// "container.image.tags" semantic conventions. It represents the container
-// image tags. An example can be found in [Docker Image
-// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
-// Should be only the `` section of the full name for example from
-// `registry.example.com/my-org/my-image:`.
-func ContainerImageTags(val ...string) attribute.KeyValue {
- return ContainerImageTagsKey.StringSlice(val)
-}
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// This group defines the attributes used to describe telemetry in the context
-// of databases.
-const (
- // DBClientConnectionsPoolNameKey is the attribute Key conforming to the
- // "db.client.connections.pool.name" semantic conventions. It represents
- // the name of the connection pool; unique within the instrumented
- // application. In case the connection pool implementation doesn't provide
- // a name, instrumentation should use a combination of `server.address` and
- // `server.port` attributes formatted as `server.address:server.port`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myDataSource'
- DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
-
- // DBClientConnectionsStateKey is the attribute Key conforming to the
- // "db.client.connections.state" semantic conventions. It represents the
- // state of a connection in the pool
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle'
- DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
-
- // DBCollectionNameKey is the attribute Key conforming to the
- // "db.collection.name" semantic conventions. It represents the name of a
- // collection (table, container) within the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'public.users', 'customers'
- // Note: If the collection name is parsed from the query, it SHOULD match
- // the value provided in the query and may be qualified with the schema and
- // database name.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBCollectionNameKey = attribute.Key("db.collection.name")
-
- // DBNamespaceKey is the attribute Key conforming to the "db.namespace"
- // semantic conventions. It represents the name of the database, fully
- // qualified within the server address and port.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'customers', 'test.users'
- // Note: If a database system has multiple namespace components, they
- // SHOULD be concatenated (potentially using database system specific
- // conventions) from most general to most specific namespace component, and
- // more specific namespaces SHOULD NOT be captured without the more general
- // namespaces, to ensure that "startswith" queries for the more general
- // namespaces will be valid.
- // Semantic conventions for individual database systems SHOULD document
- // what `db.namespace` means in the context of that system.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBNamespaceKey = attribute.Key("db.namespace")
-
- // DBOperationNameKey is the attribute Key conforming to the
- // "db.operation.name" semantic conventions. It represents the name of the
- // operation or command being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: It is RECOMMENDED to capture the value as provided by the
- // application without attempting to do any case normalization.
- DBOperationNameKey = attribute.Key("db.operation.name")
-
- // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
- // semantic conventions. It represents the database query being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
- // "WuValue"'
- DBQueryTextKey = attribute.Key("db.query.text")
-
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents the database management system (DBMS) product
- // as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual DBMS may differ from the one identified by the client.
- // For example, when using PostgreSQL client libraries to connect to a
- // CockroachDB, the `db.system` is set to `postgresql` based on the
- // instrumentation's best knowledge.
- DBSystemKey = attribute.Key("db.system")
-)
-
-var (
- // idle
- DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
- // used
- DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
-// the "db.client.connections.pool.name" semantic conventions. It represents
-// the name of the connection pool; unique within the instrumented application.
-// In case the connection pool implementation doesn't provide a name,
-// instrumentation should use a combination of `server.address` and
-// `server.port` attributes formatted as `server.address:server.port`.
-func DBClientConnectionsPoolName(val string) attribute.KeyValue {
- return DBClientConnectionsPoolNameKey.String(val)
-}
-
-// DBCollectionName returns an attribute KeyValue conforming to the
-// "db.collection.name" semantic conventions. It represents the name of a
-// collection (table, container) within the database.
-func DBCollectionName(val string) attribute.KeyValue {
- return DBCollectionNameKey.String(val)
-}
-
-// DBNamespace returns an attribute KeyValue conforming to the
-// "db.namespace" semantic conventions. It represents the name of the database,
-// fully qualified within the server address and port.
-func DBNamespace(val string) attribute.KeyValue {
- return DBNamespaceKey.String(val)
-}
-
-// DBOperationName returns an attribute KeyValue conforming to the
-// "db.operation.name" semantic conventions. It represents the name of the
-// operation or command being executed.
-func DBOperationName(val string) attribute.KeyValue {
- return DBOperationNameKey.String(val)
-}
-
-// DBQueryText returns an attribute KeyValue conforming to the
-// "db.query.text" semantic conventions. It represents the database query being
-// executed.
-func DBQueryText(val string) attribute.KeyValue {
- return DBQueryTextKey.String(val)
-}
-
-// This group defines attributes for Cassandra.
-const (
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// This group defines attributes for Azure Cosmos DB.
-const (
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// This group defines attributes for Elasticsearch.
-const (
- // DBElasticsearchClusterNameKey is the attribute Key conforming to the
- // "db.elasticsearch.cluster.name" semantic conventions. It represents the
- // represents the identifier of an Elasticsearch cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
- DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
-
- // DBElasticsearchNodeNameKey is the attribute Key conforming to the
- // "db.elasticsearch.node.name" semantic conventions. It represents the
- // represents the human-readable identifier of the node/instance to which a
- // request was routed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-0000000001'
- DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
-)
-
-// DBElasticsearchClusterName returns an attribute KeyValue conforming to
-// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
-// represents the identifier of an Elasticsearch cluster.
-func DBElasticsearchClusterName(val string) attribute.KeyValue {
- return DBElasticsearchClusterNameKey.String(val)
-}
-
-// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
-// "db.elasticsearch.node.name" semantic conventions. It represents the
-// represents the human-readable identifier of the node/instance to which a
-// request was routed.
-func DBElasticsearchNodeName(val string) attribute.KeyValue {
- return DBElasticsearchNodeNameKey.String(val)
-}
-
-// Attributes for software deployments.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'staging', 'production'
- // Note: `deployment.environment` does not affect the uniqueness
- // constraints defined through
- // the `service.namespace`, `service.name` and `service.instance.id`
- // resource attributes.
- // This implies that resources carrying the following attribute
- // combinations MUST be
- // considered to be identifying the same service:
- //
- // * `service.name=frontend`, `deployment.environment=production`
- // * `service.name=frontend`, `deployment.environment=staging`.
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
-// (aka deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// Attributes that represents an occurrence of a lifecycle transition on the
-// Android platform.
-const (
- // AndroidStateKey is the attribute Key conforming to the "android.state"
- // semantic conventions. It represents the deprecated use the
- // `device.app.lifecycle` event definition including `android.state` as a
- // payload field instead.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The Android lifecycle states are defined in [Activity lifecycle
- // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
- // and from which the `OS identifiers` are derived.
- AndroidStateKey = attribute.Key("android.state")
-)
-
-var (
- // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
- AndroidStateCreated = AndroidStateKey.String("created")
- // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
- AndroidStateBackground = AndroidStateKey.String("background")
- // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
- AndroidStateForeground = AndroidStateKey.String("foreground")
-)
-
-// These attributes may be used to describe the receiver of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // DestinationAddressKey is the attribute Key conforming to the
- // "destination.address" semantic conventions. It represents the
- // destination address - domain name if available without reverse DNS
- // lookup; otherwise, IP address or Unix domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the source side, and when communicating through
- // an intermediary, `destination.address` SHOULD represent the destination
- // address behind any intermediaries, for example proxies, if it's
- // available.
- DestinationAddressKey = attribute.Key("destination.address")
-
- // DestinationPortKey is the attribute Key conforming to the
- // "destination.port" semantic conventions. It represents the destination
- // port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- DestinationPortKey = attribute.Key("destination.port")
-)
-
-// DestinationAddress returns an attribute KeyValue conforming to the
-// "destination.address" semantic conventions. It represents the destination
-// address - domain name if available without reverse DNS lookup; otherwise, IP
-// address or Unix domain socket name.
-func DestinationAddress(val string) attribute.KeyValue {
- return DestinationAddressKey.String(val)
-}
-
-// DestinationPort returns an attribute KeyValue conforming to the
-// "destination.port" semantic conventions. It represents the destination port
-// number
-func DestinationPort(val int) attribute.KeyValue {
- return DestinationPortKey.Int(val)
-}
-
-// Describes device attributes.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine-readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human-readable version of
- // the device model rather than a machine-readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// These attributes may be used for any disk related operation.
-const (
- // DiskIoDirectionKey is the attribute Key conforming to the
- // "disk.io.direction" semantic conventions. It represents the disk IO
- // operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read'
- DiskIoDirectionKey = attribute.Key("disk.io.direction")
-)
-
-var (
- // read
- DiskIoDirectionRead = DiskIoDirectionKey.String("read")
- // write
- DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
-)
-
-// The shared attributes used to report a DNS query.
-const (
- // DNSQuestionNameKey is the attribute Key conforming to the
- // "dns.question.name" semantic conventions. It represents the name being
- // queried.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.example.com', 'opentelemetry.io'
- // Note: If the name field contains non-printable characters (below 32 or
- // above 126), those characters should be represented as escaped base 10
- // integers (\DDD). Back slashes and quotes should be escaped. Tabs,
- // carriage returns, and line feeds should be converted to \t, \r, and \n
- // respectively.
- DNSQuestionNameKey = attribute.Key("dns.question.name")
-)
-
-// DNSQuestionName returns an attribute KeyValue conforming to the
-// "dns.question.name" semantic conventions. It represents the name being
-// queried.
-func DNSQuestionName(val string) attribute.KeyValue {
- return DNSQuestionNameKey.String(val)
-}
-
-// Attributes for operations with an authenticated and/or authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// The shared attributes used to report an error.
-const (
- // ErrorTypeKey is the attribute Key conforming to the "error.type"
- // semantic conventions. It represents the describes a class of error the
- // operation ended with.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'timeout', 'java.net.UnknownHostException',
- // 'server_certificate_invalid', '500'
- // Note: The `error.type` SHOULD be predictable, and SHOULD have low
- // cardinality.
- //
- // When `error.type` is set to a type (e.g., an exception type), its
- // canonical class name identifying the type within the artifact SHOULD be
- // used.
- //
- // Instrumentations SHOULD document the list of errors they report.
- //
- // The cardinality of `error.type` within one instrumentation library
- // SHOULD be low.
- // Telemetry consumers that aggregate data from multiple instrumentation
- // libraries and applications
- // should be prepared for `error.type` to have high cardinality at query
- // time when no
- // additional filters are applied.
- //
- // If the operation has completed successfully, instrumentations SHOULD NOT
- // set `error.type`.
- //
- // If a specific domain defines its own set of error identifiers (such as
- // HTTP or gRPC status codes),
- // it's RECOMMENDED to:
- //
- // * Use a domain-specific attribute
- // * Set `error.type` to capture all errors, regardless of whether they are
- // defined within the domain-specific set or not.
- ErrorTypeKey = attribute.Key("error.type")
-)
-
-var (
- // A fallback error value to be used when the instrumentation doesn't define a custom value
- ErrorTypeOther = ErrorTypeKey.String("_OTHER")
-)
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the identifies the class / type of
- // event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'browser.mouse.click', 'device.app.lifecycle'
- // Note: Event names are subject to the same rules as [attribute
- // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
- // Notably, event names are namespaced to avoid collisions and provide a
- // clean separation of semantics for events in separate domains like
- // browser, mobile, and kubernetes.
- EventNameKey = attribute.Key("event.name")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the identifies the class / type of
-// event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example for recording span
- // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// FaaS attributes
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `/`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run (Services):** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// Attributes for Feature Flags.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// Describes file attributes.
-const (
- // FileDirectoryKey is the attribute Key conforming to the "file.directory"
- // semantic conventions. It represents the directory where the file is
- // located. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/user', 'C:\\Program Files\\MyApp'
- FileDirectoryKey = attribute.Key("file.directory")
-
- // FileExtensionKey is the attribute Key conforming to the "file.extension"
- // semantic conventions. It represents the file extension, excluding the
- // leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: When the file name has multiple extensions (example.tar.gz), only
- // the last one should be captured ("gz", not "tar.gz").
- FileExtensionKey = attribute.Key("file.extension")
-
- // FileNameKey is the attribute Key conforming to the "file.name" semantic
- // conventions. It represents the name of the file including the extension,
- // without the directory.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.png'
- FileNameKey = attribute.Key("file.name")
-
- // FilePathKey is the attribute Key conforming to the "file.path" semantic
- // conventions. It represents the full path to the file, including the file
- // name. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/alice/example.png', 'C:\\Program
- // Files\\MyApp\\myapp.exe'
- FilePathKey = attribute.Key("file.path")
-
- // FileSizeKey is the attribute Key conforming to the "file.size" semantic
- // conventions. It represents the file size in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- FileSizeKey = attribute.Key("file.size")
-)
-
-// FileDirectory returns an attribute KeyValue conforming to the
-// "file.directory" semantic conventions. It represents the directory where the
-// file is located. It should include the drive letter, when appropriate.
-func FileDirectory(val string) attribute.KeyValue {
- return FileDirectoryKey.String(val)
-}
-
-// FileExtension returns an attribute KeyValue conforming to the
-// "file.extension" semantic conventions. It represents the file extension,
-// excluding the leading dot.
-func FileExtension(val string) attribute.KeyValue {
- return FileExtensionKey.String(val)
-}
-
-// FileName returns an attribute KeyValue conforming to the "file.name"
-// semantic conventions. It represents the name of the file including the
-// extension, without the directory.
-func FileName(val string) attribute.KeyValue {
- return FileNameKey.String(val)
-}
-
-// FilePath returns an attribute KeyValue conforming to the "file.path"
-// semantic conventions. It represents the full path to the file, including the
-// file name. It should include the drive letter, when appropriate.
-func FilePath(val string) attribute.KeyValue {
- return FilePathKey.String(val)
-}
-
-// FileSize returns an attribute KeyValue conforming to the "file.size"
-// semantic conventions. It represents the file size in bytes.
-func FileSize(val int) attribute.KeyValue {
- return FileSizeKey.Int(val)
-}
-
-// Attributes for Google Cloud Run.
-const (
- // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.execution" semantic conventions. It represents the
- // name of the Cloud Run
- // [execution](https://cloud.google.com/run/docs/managing/job-executions)
- // being run for the Job, as set by the
- // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'job-name-xxxx', 'sample-job-mdw84'
- GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
-
- // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
- // index for a task within an execution as provided by the
- // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1
- GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
-)
-
-// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
-// of the Cloud Run
-// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
-// run for the Job, as set by the
-// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobExecution(val string) attribute.KeyValue {
- return GCPCloudRunJobExecutionKey.String(val)
-}
-
-// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
-// for a task within an execution as provided by the
-// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
- return GCPCloudRunJobTaskIndexKey.Int(val)
-}
-
-// Attributes for Google Compute Engine (GCE).
-const (
- // GCPGceInstanceHostnameKey is the attribute Key conforming to the
- // "gcp.gce.instance.hostname" semantic conventions. It represents the
- // hostname of a GCE instance. This is the full value of the default or
- // [custom
- // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-host1234.example.com',
- // 'sample-vm.us-west1-b.c.my-project.internal'
- GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
-
- // GCPGceInstanceNameKey is the attribute Key conforming to the
- // "gcp.gce.instance.name" semantic conventions. It represents the instance
- // name of a GCE instance. This is the value provided by `host.name`, the
- // visible name of the instance in the Cloud Console UI, and the prefix for
- // the default hostname of the instance as defined by the [default internal
- // DNS
- // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-1', 'my-vm-name'
- GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
-)
-
-// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
-// of a GCE instance. This is the full value of the default or [custom
-// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
-func GCPGceInstanceHostname(val string) attribute.KeyValue {
- return GCPGceInstanceHostnameKey.String(val)
-}
-
-// GCPGceInstanceName returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.name" semantic conventions. It represents the instance
-// name of a GCE instance. This is the value provided by `host.name`, the
-// visible name of the instance in the Cloud Console UI, and the prefix for the
-// default hostname of the instance as defined by the [default internal DNS
-// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
-func GCPGceInstanceName(val string) attribute.KeyValue {
- return GCPGceInstanceNameKey.String(val)
-}
-
-// The attributes used to describe telemetry in the context of LLM (Large
-// Language Models) requests and responses.
-const (
- // GenAiCompletionKey is the attribute Key conforming to the
- // "gen_ai.completion" semantic conventions. It represents the full
- // response received from the LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'assistant', 'content': 'The capital of France is
- // Paris.'}]"
- // Note: It's RECOMMENDED to format completions as JSON string matching
- // [OpenAI messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiCompletionKey = attribute.Key("gen_ai.completion")
-
- // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
- // semantic conventions. It represents the full prompt sent to an LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'user', 'content': 'What is the capital of
- // France?'}]"
- // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
- // messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiPromptKey = attribute.Key("gen_ai.prompt")
-
- // GenAiRequestMaxTokensKey is the attribute Key conforming to the
- // "gen_ai.request.max_tokens" semantic conventions. It represents the
- // maximum number of tokens the LLM generates for a request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
-
- // GenAiRequestModelKey is the attribute Key conforming to the
- // "gen_ai.request.model" semantic conventions. It represents the name of
- // the LLM a request is being made to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4'
- GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
-
- // GenAiRequestTemperatureKey is the attribute Key conforming to the
- // "gen_ai.request.temperature" semantic conventions. It represents the
- // temperature setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0.0
- GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
-
- // GenAiRequestTopPKey is the attribute Key conforming to the
- // "gen_ai.request.top_p" semantic conventions. It represents the top_p
- // sampling setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0
- GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
-
- // GenAiResponseFinishReasonsKey is the attribute Key conforming to the
- // "gen_ai.response.finish_reasons" semantic conventions. It represents the
- // array of reasons the model stopped generating tokens, corresponding to
- // each generation received.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'stop'
- GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
-
- // GenAiResponseIDKey is the attribute Key conforming to the
- // "gen_ai.response.id" semantic conventions. It represents the unique
- // identifier for the completion.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'chatcmpl-123'
- GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
-
- // GenAiResponseModelKey is the attribute Key conforming to the
- // "gen_ai.response.model" semantic conventions. It represents the name of
- // the LLM a response was generated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4-0613'
- GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
-
- // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
- // semantic conventions. It represents the Generative AI product as
- // identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'openai'
- // Note: The actual GenAI product may differ from the one identified by the
- // client. For example, when using OpenAI client libraries to communicate
- // with Mistral, the `gen_ai.system` is set to `openai` based on the
- // instrumentation's best knowledge.
- GenAiSystemKey = attribute.Key("gen_ai.system")
-
- // GenAiUsageCompletionTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.completion_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM response (completion).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 180
- GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
-
- // GenAiUsagePromptTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM prompt.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
-)
-
-var (
- // OpenAI
- GenAiSystemOpenai = GenAiSystemKey.String("openai")
-)
-
-// GenAiCompletion returns an attribute KeyValue conforming to the
-// "gen_ai.completion" semantic conventions. It represents the full response
-// received from the LLM.
-func GenAiCompletion(val string) attribute.KeyValue {
- return GenAiCompletionKey.String(val)
-}
-
-// GenAiPrompt returns an attribute KeyValue conforming to the
-// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
-// an LLM.
-func GenAiPrompt(val string) attribute.KeyValue {
- return GenAiPromptKey.String(val)
-}
-
-// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
-// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
-// number of tokens the LLM generates for a request.
-func GenAiRequestMaxTokens(val int) attribute.KeyValue {
- return GenAiRequestMaxTokensKey.Int(val)
-}
-
-// GenAiRequestModel returns an attribute KeyValue conforming to the
-// "gen_ai.request.model" semantic conventions. It represents the name of the
-// LLM a request is being made to.
-func GenAiRequestModel(val string) attribute.KeyValue {
- return GenAiRequestModelKey.String(val)
-}
-
-// GenAiRequestTemperature returns an attribute KeyValue conforming to the
-// "gen_ai.request.temperature" semantic conventions. It represents the
-// temperature setting for the LLM request.
-func GenAiRequestTemperature(val float64) attribute.KeyValue {
- return GenAiRequestTemperatureKey.Float64(val)
-}
-
-// GenAiRequestTopP returns an attribute KeyValue conforming to the
-// "gen_ai.request.top_p" semantic conventions. It represents the top_p
-// sampling setting for the LLM request.
-func GenAiRequestTopP(val float64) attribute.KeyValue {
- return GenAiRequestTopPKey.Float64(val)
-}
-
-// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
-// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
-// array of reasons the model stopped generating tokens, corresponding to each
-// generation received.
-func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
- return GenAiResponseFinishReasonsKey.StringSlice(val)
-}
-
-// GenAiResponseID returns an attribute KeyValue conforming to the
-// "gen_ai.response.id" semantic conventions. It represents the unique
-// identifier for the completion.
-func GenAiResponseID(val string) attribute.KeyValue {
- return GenAiResponseIDKey.String(val)
-}
-
-// GenAiResponseModel returns an attribute KeyValue conforming to the
-// "gen_ai.response.model" semantic conventions. It represents the name of the
-// LLM a response was generated from.
-func GenAiResponseModel(val string) attribute.KeyValue {
- return GenAiResponseModelKey.String(val)
-}
-
-// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
-// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
-// number of tokens used in the LLM response (completion).
-func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
- return GenAiUsageCompletionTokensKey.Int(val)
-}
-
-// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
-// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
-// of tokens used in the LLM prompt.
-func GenAiUsagePromptTokens(val int) attribute.KeyValue {
- return GenAiUsagePromptTokensKey.Int(val)
-}
-
-// Attributes for GraphQL.
-const (
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// Attributes for the Android platform on which the Android application is
-// running.
-const (
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-)
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// A host is defined as a computing instance. For example, physical servers,
-// virtual machines, switches or disk array.
-const (
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- HostArchKey = attribute.Key("host.arch")
-
- // HostCPUCacheL2SizeKey is the attribute Key conforming to the
- // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
- // of level 2 memory cache available to the processor (in Bytes).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12288000
- HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
-
- // HostCPUFamilyKey is the attribute Key conforming to the
- // "host.cpu.family" semantic conventions. It represents the family or
- // generation of the CPU.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', 'PA-RISC 1.1e'
- HostCPUFamilyKey = attribute.Key("host.cpu.family")
-
- // HostCPUModelIDKey is the attribute Key conforming to the
- // "host.cpu.model.id" semantic conventions. It represents the model
- // identifier. It provides more granular information about the CPU,
- // distinguishing it from other CPUs within the same family.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', '9000/778/B180L'
- HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
-
- // HostCPUModelNameKey is the attribute Key conforming to the
- // "host.cpu.model.name" semantic conventions. It represents the model
- // designation of the processor.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
- HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
-
- // HostCPUSteppingKey is the attribute Key conforming to the
- // "host.cpu.stepping" semantic conventions. It represents the stepping or
- // core revisions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1', 'r1p1'
- HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
-
- // HostCPUVendorIDKey is the attribute Key conforming to the
- // "host.cpu.vendor.id" semantic conventions. It represents the processor
- // manufacturer identifier. A maximum 12-character string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'GenuineIntel'
- // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
- // ID string in EBX, EDX and ECX registers. Writing these to memory in this
- // order results in a 12-character string.
- HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
-
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID or host OS image ID.
- // For Cloud, this value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image or host OS as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-
- // HostIPKey is the attribute Key conforming to the "host.ip" semantic
- // conventions. It represents the available IP addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
- // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
- // addresses MUST be specified in the [RFC
- // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
- HostIPKey = attribute.Key("host.ip")
-
- // HostMacKey is the attribute Key conforming to the "host.mac" semantic
- // conventions. It represents the available MAC addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
- // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
- // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
- // as hyphen-separated octets in uppercase hexadecimal form from most to
- // least significant.
- HostMacKey = attribute.Key("host.mac")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
-// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
-// level 2 memory cache available to the processor (in Bytes).
-func HostCPUCacheL2Size(val int) attribute.KeyValue {
- return HostCPUCacheL2SizeKey.Int(val)
-}
-
-// HostCPUFamily returns an attribute KeyValue conforming to the
-// "host.cpu.family" semantic conventions. It represents the family or
-// generation of the CPU.
-func HostCPUFamily(val string) attribute.KeyValue {
- return HostCPUFamilyKey.String(val)
-}
-
-// HostCPUModelID returns an attribute KeyValue conforming to the
-// "host.cpu.model.id" semantic conventions. It represents the model
-// identifier. It provides more granular information about the CPU,
-// distinguishing it from other CPUs within the same family.
-func HostCPUModelID(val string) attribute.KeyValue {
- return HostCPUModelIDKey.String(val)
-}
-
-// HostCPUModelName returns an attribute KeyValue conforming to the
-// "host.cpu.model.name" semantic conventions. It represents the model
-// designation of the processor.
-func HostCPUModelName(val string) attribute.KeyValue {
- return HostCPUModelNameKey.String(val)
-}
-
-// HostCPUStepping returns an attribute KeyValue conforming to the
-// "host.cpu.stepping" semantic conventions. It represents the stepping or core
-// revisions.
-func HostCPUStepping(val string) attribute.KeyValue {
- return HostCPUSteppingKey.String(val)
-}
-
-// HostCPUVendorID returns an attribute KeyValue conforming to the
-// "host.cpu.vendor.id" semantic conventions. It represents the processor
-// manufacturer identifier. A maximum 12-character string.
-func HostCPUVendorID(val string) attribute.KeyValue {
- return HostCPUVendorIDKey.String(val)
-}
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID or host
-// OS image ID. For Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image or host OS as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
-// conventions. It represents the available IP addresses of the host, excluding
-// loopback interfaces.
-func HostIP(val ...string) attribute.KeyValue {
- return HostIPKey.StringSlice(val)
-}
-
-// HostMac returns an attribute KeyValue conforming to the "host.mac"
-// semantic conventions. It represents the available MAC addresses of the host,
-// excluding loopback interfaces.
-func HostMac(val ...string) attribute.KeyValue {
- return HostMacKey.StringSlice(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// Semantic convention attributes in the HTTP namespace.
-const (
- // HTTPConnectionStateKey is the attribute Key conforming to the
- // "http.connection.state" semantic conventions. It represents the state of
- // the HTTP connection in the HTTP connection pool.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'active', 'idle'
- HTTPConnectionStateKey = attribute.Key("http.connection.state")
-
- // HTTPRequestBodySizeKey is the attribute Key conforming to the
- // "http.request.body.size" semantic conventions. It represents the size of
- // the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
-
- // HTTPRequestMethodKey is the attribute Key conforming to the
- // "http.request.method" semantic conventions. It represents the hTTP
- // request method.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- // Note: HTTP request method value SHOULD be "known" to the
- // instrumentation.
- // By default, this convention defines "known" methods as the ones listed
- // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
- // and the PATCH method defined in
- // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
- //
- // If the HTTP request method is not known to instrumentation, it MUST set
- // the `http.request.method` attribute to `_OTHER`.
- //
- // If the HTTP instrumentation could end up converting valid HTTP request
- // methods to `_OTHER`, then it MUST provide a way to override
- // the list of known HTTP methods. If this override is done via environment
- // variable, then the environment variable MUST be named
- // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
- // list of case-sensitive known HTTP methods
- // (this list MUST be a full override of the default known method, it is
- // not a list of known methods in addition to the defaults).
- //
- // HTTP method names are case-sensitive and `http.request.method` attribute
- // value MUST match a known HTTP method name exactly.
- // Instrumentations for specific web frameworks that consider HTTP methods
- // to be case insensitive, SHOULD populate a canonical equivalent.
- // Tracing instrumentations that do so, MUST also set
- // `http.request.method_original` to the original value.
- HTTPRequestMethodKey = attribute.Key("http.request.method")
-
- // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
- // "http.request.method_original" semantic conventions. It represents the
- // original HTTP method sent by the client in the request line.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GeT', 'ACL', 'foo'
- HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
-
- // HTTPRequestResendCountKey is the attribute Key conforming to the
- // "http.request.resend_count" semantic conventions. It represents the
- // ordinal number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
-
- // HTTPRequestSizeKey is the attribute Key conforming to the
- // "http.request.size" semantic conventions. It represents the total size
- // of the request in bytes. This should be the total number of bytes sent
- // over the wire, including the request line (HTTP/1.1), framing (HTTP/2
- // and HTTP/3), headers, and request body if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPRequestSizeKey = attribute.Key("http.request.size")
-
- // HTTPResponseBodySizeKey is the attribute Key conforming to the
- // "http.response.body.size" semantic conventions. It represents the size
- // of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
-
- // HTTPResponseSizeKey is the attribute Key conforming to the
- // "http.response.size" semantic conventions. It represents the total size
- // of the response in bytes. This should be the total number of bytes sent
- // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
- // HTTP/3), headers, and response body and trailers if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPResponseSizeKey = attribute.Key("http.response.size")
-
- // HTTPResponseStatusCodeKey is the attribute Key conforming to the
- // "http.response.status_code" semantic conventions. It represents the
- // [HTTP response status
- // code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 200
- HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route, that is, the path
- // template in the format used by the respective server framework.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-var (
- // active state
- HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
- // idle state
- HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
-)
-
-var (
- // CONNECT method
- HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
- // DELETE method
- HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
- // GET method
- HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
- // HEAD method
- HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
- // OPTIONS method
- HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
- // PATCH method
- HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
- // POST method
- HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
- // PUT method
- HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
- // TRACE method
- HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
- // Any HTTP method that the instrumentation has no prior knowledge of
- HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
-)
-
-// HTTPRequestBodySize returns an attribute KeyValue conforming to the
-// "http.request.body.size" semantic conventions. It represents the size of the
-// request payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestBodySize(val int) attribute.KeyValue {
- return HTTPRequestBodySizeKey.Int(val)
-}
-
-// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
-// "http.request.method_original" semantic conventions. It represents the
-// original HTTP method sent by the client in the request line.
-func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
- return HTTPRequestMethodOriginalKey.String(val)
-}
-
-// HTTPRequestResendCount returns an attribute KeyValue conforming to the
-// "http.request.resend_count" semantic conventions. It represents the ordinal
-// number of request resending attempt (for any reason, including redirects).
-func HTTPRequestResendCount(val int) attribute.KeyValue {
- return HTTPRequestResendCountKey.Int(val)
-}
-
-// HTTPRequestSize returns an attribute KeyValue conforming to the
-// "http.request.size" semantic conventions. It represents the total size of
-// the request in bytes. This should be the total number of bytes sent over the
-// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and request body if any.
-func HTTPRequestSize(val int) attribute.KeyValue {
- return HTTPRequestSizeKey.Int(val)
-}
-
-// HTTPResponseBodySize returns an attribute KeyValue conforming to the
-// "http.response.body.size" semantic conventions. It represents the size of
-// the response payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseBodySize(val int) attribute.KeyValue {
- return HTTPResponseBodySizeKey.Int(val)
-}
-
-// HTTPResponseSize returns an attribute KeyValue conforming to the
-// "http.response.size" semantic conventions. It represents the total size of
-// the response in bytes. This should be the total number of bytes sent over
-// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and response body and trailers if any.
-func HTTPResponseSize(val int) attribute.KeyValue {
- return HTTPResponseSizeKey.Int(val)
-}
-
-// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
-// "http.response.status_code" semantic conventions. It represents the [HTTP
-// response status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPResponseStatusCode(val int) attribute.KeyValue {
- return HTTPResponseStatusCodeKey.Int(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route, that is, the path
-// template in the format used by the respective server framework.
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Java Virtual machine related attributes.
-const (
- // JvmBufferPoolNameKey is the attribute Key conforming to the
- // "jvm.buffer.pool.name" semantic conventions. It represents the name of
- // the buffer pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mapped', 'direct'
- // Note: Pool names are generally obtained via
- // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
- JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
-
- // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
- // semantic conventions. It represents the name of the garbage collector
- // action.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'end of minor GC', 'end of major GC'
- // Note: Garbage collector action is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
- JvmGcActionKey = attribute.Key("jvm.gc.action")
-
- // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
- // semantic conventions. It represents the name of the garbage collector.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Young Generation', 'G1 Old Generation'
- // Note: Garbage collector name is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
- JvmGcNameKey = attribute.Key("jvm.gc.name")
-
- // JvmMemoryPoolNameKey is the attribute Key conforming to the
- // "jvm.memory.pool.name" semantic conventions. It represents the name of
- // the memory pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
- // Note: Pool names are generally obtained via
- // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
- JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
-
- // JvmMemoryTypeKey is the attribute Key conforming to the
- // "jvm.memory.type" semantic conventions. It represents the type of
- // memory.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'heap', 'non_heap'
- JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
-
- // JvmThreadDaemonKey is the attribute Key conforming to the
- // "jvm.thread.daemon" semantic conventions. It represents the whether the
- // thread is daemon or not.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
-
- // JvmThreadStateKey is the attribute Key conforming to the
- // "jvm.thread.state" semantic conventions. It represents the state of the
- // thread.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'runnable', 'blocked'
- JvmThreadStateKey = attribute.Key("jvm.thread.state")
-)
-
-var (
- // Heap memory
- JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
- // Non-heap memory
- JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
-)
-
-var (
- // A thread that has not yet started is in this state
- JvmThreadStateNew = JvmThreadStateKey.String("new")
- // A thread executing in the Java virtual machine is in this state
- JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
- // A thread that is blocked waiting for a monitor lock is in this state
- JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
- // A thread that is waiting indefinitely for another thread to perform a particular action is in this state
- JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
- // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
- JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
- // A thread that has exited is in this state
- JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
-)
-
-// JvmBufferPoolName returns an attribute KeyValue conforming to the
-// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
-// buffer pool.
-func JvmBufferPoolName(val string) attribute.KeyValue {
- return JvmBufferPoolNameKey.String(val)
-}
-
-// JvmGcAction returns an attribute KeyValue conforming to the
-// "jvm.gc.action" semantic conventions. It represents the name of the garbage
-// collector action.
-func JvmGcAction(val string) attribute.KeyValue {
- return JvmGcActionKey.String(val)
-}
-
-// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
-// semantic conventions. It represents the name of the garbage collector.
-func JvmGcName(val string) attribute.KeyValue {
- return JvmGcNameKey.String(val)
-}
-
-// JvmMemoryPoolName returns an attribute KeyValue conforming to the
-// "jvm.memory.pool.name" semantic conventions. It represents the name of the
-// memory pool.
-func JvmMemoryPoolName(val string) attribute.KeyValue {
- return JvmMemoryPoolNameKey.String(val)
-}
-
-// JvmThreadDaemon returns an attribute KeyValue conforming to the
-// "jvm.thread.daemon" semantic conventions. It represents the whether the
-// thread is daemon or not.
-func JvmThreadDaemon(val bool) attribute.KeyValue {
- return JvmThreadDaemonKey.Bool(val)
-}
-
-// Kubernetes resource attributes.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-
- // K8SClusterUIDKey is the attribute Key conforming to the
- // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
- // the cluster, set to the UID of the `kube-system` namespace.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
- // Note: K8S doesn't have support for obtaining a cluster ID. If this is
- // ever
- // added, we will recommend collecting the `k8s.cluster.uid` through the
- // official APIs. In the meantime, we are able to use the `uid` of the
- // `kube-system` namespace as a proxy for cluster ID. Read on for the
- // rationale.
- //
- // Every object created in a K8S cluster is assigned a distinct UID. The
- // `kube-system` namespace is used by Kubernetes itself and will exist
- // for the lifetime of the cluster. Using the `uid` of the `kube-system`
- // namespace is a reasonable proxy for the K8S ClusterID as it will only
- // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
- // UUIDs as standardized by
- // [ISO/IEC 9834-8 and ITU-T
- // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
- // Which states:
- //
- // > If generated according to one of the mechanisms defined in Rec.
- // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
- // different from all other UUIDs generated before 3603 A.D., or is
- // extremely likely to be different (depending on the mechanism chosen).
- //
- // Therefore, UIDs between clusters should be extremely unlikely to
- // conflict.
- K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
-
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-
- // K8SContainerStatusLastTerminatedReasonKey is the attribute Key
- // conforming to the "k8s.container.status.last_terminated_reason" semantic
- // conventions. It represents the last terminated reason of the Container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Evicted', 'Error'
- K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// K8SClusterUID returns an attribute KeyValue conforming to the
-// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
-// cluster, set to the UID of the `kube-system` namespace.
-func K8SClusterUID(val string) attribute.KeyValue {
- return K8SClusterUIDKey.String(val)
-}
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
-// conforming to the "k8s.container.status.last_terminated_reason" semantic
-// conventions. It represents the last terminated reason of the Container.
-func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
- return K8SContainerStatusLastTerminatedReasonKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// Log attributes
-const (
- // LogIostreamKey is the attribute Key conforming to the "log.iostream"
- // semantic conventions. It represents the stream associated with the log.
- // See below for a list of well-known values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- LogIostreamKey = attribute.Key("log.iostream")
-)
-
-var (
- // Logs from stdout stream
- LogIostreamStdout = LogIostreamKey.String("stdout")
- // Events from stderr stream
- LogIostreamStderr = LogIostreamKey.String("stderr")
-)
-
-// Attributes for a file to which log was emitted.
-const (
- // LogFileNameKey is the attribute Key conforming to the "log.file.name"
- // semantic conventions. It represents the basename of the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'audit.log'
- LogFileNameKey = attribute.Key("log.file.name")
-
- // LogFileNameResolvedKey is the attribute Key conforming to the
- // "log.file.name_resolved" semantic conventions. It represents the
- // basename of the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'uuid.log'
- LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
-
- // LogFilePathKey is the attribute Key conforming to the "log.file.path"
- // semantic conventions. It represents the full path to the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/log/mysql/audit.log'
- LogFilePathKey = attribute.Key("log.file.path")
-
- // LogFilePathResolvedKey is the attribute Key conforming to the
- // "log.file.path_resolved" semantic conventions. It represents the full
- // path to the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/lib/docker/uuid.log'
- LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
-)
-
-// LogFileName returns an attribute KeyValue conforming to the
-// "log.file.name" semantic conventions. It represents the basename of the
-// file.
-func LogFileName(val string) attribute.KeyValue {
- return LogFileNameKey.String(val)
-}
-
-// LogFileNameResolved returns an attribute KeyValue conforming to the
-// "log.file.name_resolved" semantic conventions. It represents the basename of
-// the file, with symlinks resolved.
-func LogFileNameResolved(val string) attribute.KeyValue {
- return LogFileNameResolvedKey.String(val)
-}
-
-// LogFilePath returns an attribute KeyValue conforming to the
-// "log.file.path" semantic conventions. It represents the full path to the
-// file.
-func LogFilePath(val string) attribute.KeyValue {
- return LogFilePathKey.String(val)
-}
-
-// LogFilePathResolved returns an attribute KeyValue conforming to the
-// "log.file.path_resolved" semantic conventions. It represents the full path
-// to the file, with symlinks resolved.
-func LogFilePathResolved(val string) attribute.KeyValue {
- return LogFilePathResolvedKey.String(val)
-}
-
-// The generic attributes that may be used in any Log Record.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Attributes describing telemetry around messaging systems and messaging
-// activities.
-const (
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-
- // MessagingClientIDKey is the attribute Key conforming to the
- // "messaging.client.id" semantic conventions. It represents a unique
- // identifier for the client that consumes or produces a message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'client-5', 'myhost@8742@s8083jm'
- MessagingClientIDKey = attribute.Key("messaging.client.id")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker doesn't have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationPartitionIDKey is the attribute Key conforming to
- // the "messaging.destination.partition.id" semantic conventions. It
- // represents the identifier of the partition messages are sent to or
- // received from, unique within the `messaging.destination.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1'
- MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
- // to the "messaging.destination_publish.anonymous" semantic conventions.
- // It represents a boolean that is true if the publish message destination
- // is anonymous (could be unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
-
- // MessagingDestinationPublishNameKey is the attribute Key conforming to
- // the "messaging.destination_publish.name" semantic conventions. It
- // represents the name of the original destination the message was
- // published to
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: The name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker doesn't have such notion, the original destination name
- // SHOULD uniquely identify the broker.
- MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
-
- // MessagingMessageBodySizeKey is the attribute Key conforming to the
- // "messaging.message.body.size" semantic conventions. It represents the
- // size of the message body in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1439
- // Note: This can refer to both the compressed or uncompressed body size.
- // If both sizes are known, the uncompressed
- // body size should be used.
- MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the conversation ID identifying the conversation to which the message
- // belongs, represented as a string. Sometimes called "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
- // "messaging.message.envelope.size" semantic conventions. It represents
- // the size of the message body and metadata in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2738
- // Note: This can refer to both the compressed or uncompressed size. If
- // both sizes are known, the uncompressed
- // size should be used.
- MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
-
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingOperationNameKey is the attribute Key conforming to the
- // "messaging.operation.name" semantic conventions. It represents the
- // system-specific name of the messaging operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack', 'nack', 'send'
- MessagingOperationNameKey = attribute.Key("messaging.operation.name")
-
- // MessagingOperationTypeKey is the attribute Key conforming to the
- // "messaging.operation.type" semantic conventions. It represents a string
- // identifying the type of the messaging operation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
-
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents the messaging
- // system as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual messaging system may differ from the one known by the
- // client. For example, when using Kafka client libraries to communicate
- // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
- // the instrumentation's best knowledge.
- MessagingSystemKey = attribute.Key("messaging.system")
-)
-
-var (
- // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
- MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
- // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
- MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
- // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
- MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
- // One or more messages are delivered to or processed by a consumer
- MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
- // One or more messages are settled
- MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
-)
-
-var (
- // Apache ActiveMQ
- MessagingSystemActivemq = MessagingSystemKey.String("activemq")
- // Amazon Simple Queue Service (SQS)
- MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
- // Azure Event Grid
- MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
- // Azure Event Hubs
- MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
- // Azure Service Bus
- MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
- // Google Cloud Pub/Sub
- MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
- // Java Message Service
- MessagingSystemJms = MessagingSystemKey.String("jms")
- // Apache Kafka
- MessagingSystemKafka = MessagingSystemKey.String("kafka")
- // RabbitMQ
- MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
- // Apache RocketMQ
- MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
-)
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// MessagingClientID returns an attribute KeyValue conforming to the
-// "messaging.client.id" semantic conventions. It represents a unique
-// identifier for the client that consumes or produces a message.
-func MessagingClientID(val string) attribute.KeyValue {
- return MessagingClientIDKey.String(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationPartitionID returns an attribute KeyValue conforming
-// to the "messaging.destination.partition.id" semantic conventions. It
-// represents the identifier of the partition messages are sent to or received
-// from, unique within the `messaging.destination.name`.
-func MessagingDestinationPartitionID(val string) attribute.KeyValue {
- return MessagingDestinationPartitionIDKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationPublishAnonymous returns an attribute KeyValue
-// conforming to the "messaging.destination_publish.anonymous" semantic
-// conventions. It represents a boolean that is true if the publish message
-// destination is anonymous (could be unnamed or have auto-generated name).
-func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationPublishAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationPublishName returns an attribute KeyValue conforming
-// to the "messaging.destination_publish.name" semantic conventions. It
-// represents the name of the original destination the message was published to
-func MessagingDestinationPublishName(val string) attribute.KeyValue {
- return MessagingDestinationPublishNameKey.String(val)
-}
-
-// MessagingMessageBodySize returns an attribute KeyValue conforming to the
-// "messaging.message.body.size" semantic conventions. It represents the size
-// of the message body in bytes.
-func MessagingMessageBodySize(val int) attribute.KeyValue {
- return MessagingMessageBodySizeKey.Int(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the conversation ID identifying the conversation to which the
-// message belongs, represented as a string. Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
-// the "messaging.message.envelope.size" semantic conventions. It represents
-// the size of the message body and metadata in bytes.
-func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
- return MessagingMessageEnvelopeSizeKey.Int(val)
-}
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingOperationName returns an attribute KeyValue conforming to the
-// "messaging.operation.name" semantic conventions. It represents the
-// system-specific name of the messaging operation.
-func MessagingOperationName(val string) attribute.KeyValue {
- return MessagingOperationNameKey.String(val)
-}
-
-// This group describes attributes specific to Apache Kafka.
-const (
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// This group describes attributes specific to RabbitMQ.
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-
- // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
- // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
- // It represents the rabbitMQ message delivery tag
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 123
- MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
-// conventions. It represents the rabbitMQ message delivery tag
-func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
- return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
-}
-
-// This group describes attributes specific to RocketMQ.
-const (
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// This group describes attributes specific to GCP Pub/Sub.
-const (
- // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
- // It represents the ack deadline in seconds set for the modify ack
- // deadline request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
-
- // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
- // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
- // represents the ack id for a given message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack_id'
- MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
-
- // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
- // conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
- // semantic conventions. It represents the delivery attempt for a given
- // message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
-
- // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
- // It represents the ordering key for a given message. If the attribute is
- // not present, the message does not have an ordering key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ordering_key'
- MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
-)
-
-// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
-// conventions. It represents the ack deadline in seconds set for the modify
-// ack deadline request.
-func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
-// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
-// represents the ack id for a given message.
-func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckIDKey.String(val)
-}
-
-// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
-// conventions. It represents the delivery attempt for a given message.
-func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
-// conventions. It represents the ordering key for a given message. If the
-// attribute is not present, the message does not have an ordering key.
-func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
-}
-
-// This group describes attributes specific to Azure Service Bus.
-const (
- // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
- // conforming to the "messaging.servicebus.destination.subscription_name"
- // semantic conventions. It represents the name of the subscription in the
- // topic messages are received from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mySubscription'
- MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
-
- // MessagingServicebusDispositionStatusKey is the attribute Key conforming
- // to the "messaging.servicebus.disposition_status" semantic conventions.
- // It represents the describes the [settlement
- // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
-
- // MessagingServicebusMessageDeliveryCountKey is the attribute Key
- // conforming to the "messaging.servicebus.message.delivery_count" semantic
- // conventions. It represents the number of deliveries that have been
- // attempted for this message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
-
- // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
- // conforming to the "messaging.servicebus.message.enqueued_time" semantic
- // conventions. It represents the UTC epoch seconds at which the message
- // has been accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
-)
-
-var (
- // Message is completed
- MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
- // Message is abandoned
- MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
- // Message is sent to dead letter queue
- MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
- // Message is deferred
- MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
-)
-
-// MessagingServicebusDestinationSubscriptionName returns an attribute
-// KeyValue conforming to the
-// "messaging.servicebus.destination.subscription_name" semantic conventions.
-// It represents the name of the subscription in the topic messages are
-// received from.
-func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
- return MessagingServicebusDestinationSubscriptionNameKey.String(val)
-}
-
-// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.delivery_count" semantic
-// conventions. It represents the number of deliveries that have been attempted
-// for this message.
-func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
- return MessagingServicebusMessageDeliveryCountKey.Int(val)
-}
-
-// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
-}
-
-// This group describes attributes specific to Azure Event Hubs.
-const (
- // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
- // the "messaging.eventhubs.consumer.group" semantic conventions. It
- // represents the name of the consumer group the event consumer is
- // associated with.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'indexer'
- MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
-
- // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
- // to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
- // It represents the UTC epoch seconds at which the message has been
- // accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
-)
-
-// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
-// to the "messaging.eventhubs.consumer.group" semantic conventions. It
-// represents the name of the consumer group the event consumer is associated
-// with.
-func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
- return MessagingEventhubsConsumerGroupKey.String(val)
-}
-
-// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetworkCarrierIccKey is the attribute Key conforming to the
- // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
- // alpha-2 2-character country code associated with the mobile carrier
- // network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'DE'
- NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
-
- // NetworkCarrierMccKey is the attribute Key conforming to the
- // "network.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '310'
- NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
-
- // NetworkCarrierMncKey is the attribute Key conforming to the
- // "network.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '001'
- NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
-
- // NetworkCarrierNameKey is the attribute Key conforming to the
- // "network.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'sprint'
- NetworkCarrierNameKey = attribute.Key("network.carrier.name")
-
- // NetworkConnectionSubtypeKey is the attribute Key conforming to the
- // "network.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'LTE'
- NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
-
- // NetworkConnectionTypeKey is the attribute Key conforming to the
- // "network.connection.type" semantic conventions. It represents the
- // internet connection type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'wifi'
- NetworkConnectionTypeKey = attribute.Key("network.connection.type")
-
- // NetworkIoDirectionKey is the attribute Key conforming to the
- // "network.io.direction" semantic conventions. It represents the network
- // IO operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'transmit'
- NetworkIoDirectionKey = attribute.Key("network.io.direction")
-
- // NetworkLocalAddressKey is the attribute Key conforming to the
- // "network.local.address" semantic conventions. It represents the local
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkLocalAddressKey = attribute.Key("network.local.address")
-
- // NetworkLocalPortKey is the attribute Key conforming to the
- // "network.local.port" semantic conventions. It represents the local port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkLocalPortKey = attribute.Key("network.local.port")
-
- // NetworkPeerAddressKey is the attribute Key conforming to the
- // "network.peer.address" semantic conventions. It represents the peer
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkPeerAddressKey = attribute.Key("network.peer.address")
-
- // NetworkPeerPortKey is the attribute Key conforming to the
- // "network.peer.port" semantic conventions. It represents the peer port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkPeerPortKey = attribute.Key("network.peer.port")
-
- // NetworkProtocolNameKey is the attribute Key conforming to the
- // "network.protocol.name" semantic conventions. It represents the [OSI
- // application layer](https://osi-model.com/application-layer/) or non-OSI
- // equivalent.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkProtocolNameKey = attribute.Key("network.protocol.name")
-
- // NetworkProtocolVersionKey is the attribute Key conforming to the
- // "network.protocol.version" semantic conventions. It represents the
- // actual version of the protocol used for network communication.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.1', '2'
- // Note: If protocol version is subject to negotiation (for example using
- // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
- // SHOULD be set to the negotiated version. If the actual protocol version
- // is not known, this attribute SHOULD NOT be set.
- NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
-
- // NetworkTransportKey is the attribute Key conforming to the
- // "network.transport" semantic conventions. It represents the [OSI
- // transport layer](https://osi-model.com/transport-layer/) or
- // [inter-process communication
- // method](https://wikipedia.org/wiki/Inter-process_communication).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tcp', 'udp'
- // Note: The value SHOULD be normalized to lowercase.
- //
- // Consider always setting the transport when setting a port number, since
- // a port number is ambiguous without knowing the transport. For example
- // different processes could be listening on TCP port 12345 and UDP port
- // 12345.
- NetworkTransportKey = attribute.Key("network.transport")
-
- // NetworkTypeKey is the attribute Key conforming to the "network.type"
- // semantic conventions. It represents the [OSI network
- // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ipv4', 'ipv6'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkTypeKey = attribute.Key("network.type")
-)
-
-var (
- // GPRS
- NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
- // EDGE
- NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
- // UMTS
- NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
- // CDMA
- NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
- // IDEN
- NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
- // EHRPD
- NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
- // GSM
- NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
-)
-
-var (
- // wifi
- NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
- // wired
- NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
- // cell
- NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
- // unavailable
- NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
- // unknown
- NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
-)
-
-var (
- // transmit
- NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
- // receive
- NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
-)
-
-var (
- // TCP
- NetworkTransportTCP = NetworkTransportKey.String("tcp")
- // UDP
- NetworkTransportUDP = NetworkTransportKey.String("udp")
- // Named or anonymous pipe
- NetworkTransportPipe = NetworkTransportKey.String("pipe")
- // Unix domain socket
- NetworkTransportUnix = NetworkTransportKey.String("unix")
-)
-
-var (
- // IPv4
- NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
- // IPv6
- NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
-)
-
-// NetworkCarrierIcc returns an attribute KeyValue conforming to the
-// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetworkCarrierIcc(val string) attribute.KeyValue {
- return NetworkCarrierIccKey.String(val)
-}
-
-// NetworkCarrierMcc returns an attribute KeyValue conforming to the
-// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
-// country code.
-func NetworkCarrierMcc(val string) attribute.KeyValue {
- return NetworkCarrierMccKey.String(val)
-}
-
-// NetworkCarrierMnc returns an attribute KeyValue conforming to the
-// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
-// network code.
-func NetworkCarrierMnc(val string) attribute.KeyValue {
- return NetworkCarrierMncKey.String(val)
-}
-
-// NetworkCarrierName returns an attribute KeyValue conforming to the
-// "network.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetworkCarrierName(val string) attribute.KeyValue {
- return NetworkCarrierNameKey.String(val)
-}
-
-// NetworkLocalAddress returns an attribute KeyValue conforming to the
-// "network.local.address" semantic conventions. It represents the local
-// address of the network connection - IP address or Unix domain socket name.
-func NetworkLocalAddress(val string) attribute.KeyValue {
- return NetworkLocalAddressKey.String(val)
-}
-
-// NetworkLocalPort returns an attribute KeyValue conforming to the
-// "network.local.port" semantic conventions. It represents the local port
-// number of the network connection.
-func NetworkLocalPort(val int) attribute.KeyValue {
- return NetworkLocalPortKey.Int(val)
-}
-
-// NetworkPeerAddress returns an attribute KeyValue conforming to the
-// "network.peer.address" semantic conventions. It represents the peer address
-// of the network connection - IP address or Unix domain socket name.
-func NetworkPeerAddress(val string) attribute.KeyValue {
- return NetworkPeerAddressKey.String(val)
-}
-
-// NetworkPeerPort returns an attribute KeyValue conforming to the
-// "network.peer.port" semantic conventions. It represents the peer port number
-// of the network connection.
-func NetworkPeerPort(val int) attribute.KeyValue {
- return NetworkPeerPortKey.Int(val)
-}
-
-// NetworkProtocolName returns an attribute KeyValue conforming to the
-// "network.protocol.name" semantic conventions. It represents the [OSI
-// application layer](https://osi-model.com/application-layer/) or non-OSI
-// equivalent.
-func NetworkProtocolName(val string) attribute.KeyValue {
- return NetworkProtocolNameKey.String(val)
-}
-
-// NetworkProtocolVersion returns an attribute KeyValue conforming to the
-// "network.protocol.version" semantic conventions. It represents the actual
-// version of the protocol used for network communication.
-func NetworkProtocolVersion(val string) attribute.KeyValue {
- return NetworkProtocolVersionKey.String(val)
-}
-
-// An OCI image manifest.
-const (
- // OciManifestDigestKey is the attribute Key conforming to the
- // "oci.manifest.digest" semantic conventions. It represents the digest of
- // the OCI image manifest. For container images specifically is the digest
- // by which the container image is known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
- // Note: Follows [OCI Image Manifest
- // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
- // and specifically the [Digest
- // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
- // An example can be found in [Example Image
- // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
- OciManifestDigestKey = attribute.Key("oci.manifest.digest")
-)
-
-// OciManifestDigest returns an attribute KeyValue conforming to the
-// "oci.manifest.digest" semantic conventions. It represents the digest of the
-// OCI image manifest. For container images specifically is the digest by which
-// the container image is known.
-func OciManifestDigest(val string) attribute.KeyValue {
- return OciManifestDigestKey.String(val)
-}
-
-// Attributes used by the OpenTracing Shim layer.
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span doesn't depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
- // semantic conventions. It represents the unique identifier for a
- // particular build or compilation of the operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
- OSBuildIDKey = attribute.Key("os.build_id")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- OSTypeKey = attribute.Key("os.type")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
-// semantic conventions. It represents the unique identifier for a particular
-// build or compilation of the operating system.
-func OSBuildID(val string) attribute.KeyValue {
- return OSBuildIDKey.String(val)
-}
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// Attributes reserved for OpenTelemetry
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](/docs/resource/README.md#service) of the remote
- // service. SHOULD be equal to the actual `service.name` resource attribute
- // of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](/docs/resource/README.md#service) of the remote service.
-// SHOULD be equal to the actual `service.name` resource attribute of the
-// remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessContextSwitchTypeKey is the attribute Key conforming to the
- // "process.context_switch_type" semantic conventions. It represents the
- // specifies whether the context switches for this data point were
- // voluntary or involuntary.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
-
- // ProcessCreationTimeKey is the attribute Key conforming to the
- // "process.creation.time" semantic conventions. It represents the date and
- // time the process was created, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:25:34.853Z'
- ProcessCreationTimeKey = attribute.Key("process.creation.time")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessExitCodeKey is the attribute Key conforming to the
- // "process.exit.code" semantic conventions. It represents the exit code of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 127
- ProcessExitCodeKey = attribute.Key("process.exit.code")
-
- // ProcessExitTimeKey is the attribute Key conforming to the
- // "process.exit.time" semantic conventions. It represents the date and
- // time the process exited, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:26:12.315Z'
- ProcessExitTimeKey = attribute.Key("process.exit.time")
-
- // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
- // "process.group_leader.pid" semantic conventions. It represents the PID
- // of the process's group leader. This is also the process group ID (PGID)
- // of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 23
- ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
-
- // ProcessInteractiveKey is the attribute Key conforming to the
- // "process.interactive" semantic conventions. It represents the whether
- // the process is connected to an interactive shell.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessInteractiveKey = attribute.Key("process.interactive")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-
- // ProcessPagingFaultTypeKey is the attribute Key conforming to the
- // "process.paging.fault_type" semantic conventions. It represents the type
- // of page fault for this data point. Type `major` is for major/hard page
- // faults, and `minor` is for minor/soft page faults.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PPID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessRealUserIDKey is the attribute Key conforming to the
- // "process.real_user.id" semantic conventions. It represents the real user
- // ID (RUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000
- ProcessRealUserIDKey = attribute.Key("process.real_user.id")
-
- // ProcessRealUserNameKey is the attribute Key conforming to the
- // "process.real_user.name" semantic conventions. It represents the
- // username of the real user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessRealUserNameKey = attribute.Key("process.real_user.name")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessSavedUserIDKey is the attribute Key conforming to the
- // "process.saved_user.id" semantic conventions. It represents the saved
- // user ID (SUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1002
- ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
-
- // ProcessSavedUserNameKey is the attribute Key conforming to the
- // "process.saved_user.name" semantic conventions. It represents the
- // username of the saved user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
-
- // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
- // "process.session_leader.pid" semantic conventions. It represents the PID
- // of the process's session leader. This is also the session ID (SID) of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 14
- ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
-
- // ProcessUserIDKey is the attribute Key conforming to the
- // "process.user.id" semantic conventions. It represents the effective user
- // ID (EUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1001
- ProcessUserIDKey = attribute.Key("process.user.id")
-
- // ProcessUserNameKey is the attribute Key conforming to the
- // "process.user.name" semantic conventions. It represents the username of
- // the effective user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessUserNameKey = attribute.Key("process.user.name")
-
- // ProcessVpidKey is the attribute Key conforming to the "process.vpid"
- // semantic conventions. It represents the virtual process identifier.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12
- // Note: The process ID within a PID namespace. This is not necessarily
- // unique across all processes on the host but it is unique within the
- // process namespace that the process exists within.
- ProcessVpidKey = attribute.Key("process.vpid")
-)
-
-var (
- // voluntary
- ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
- // involuntary
- ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
-)
-
-var (
- // major
- ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
- // minor
- ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
-)
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCreationTime returns an attribute KeyValue conforming to the
-// "process.creation.time" semantic conventions. It represents the date and
-// time the process was created, in ISO 8601 format.
-func ProcessCreationTime(val string) attribute.KeyValue {
- return ProcessCreationTimeKey.String(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessExitCode returns an attribute KeyValue conforming to the
-// "process.exit.code" semantic conventions. It represents the exit code of the
-// process.
-func ProcessExitCode(val int) attribute.KeyValue {
- return ProcessExitCodeKey.Int(val)
-}
-
-// ProcessExitTime returns an attribute KeyValue conforming to the
-// "process.exit.time" semantic conventions. It represents the date and time
-// the process exited, in ISO 8601 format.
-func ProcessExitTime(val string) attribute.KeyValue {
- return ProcessExitTimeKey.String(val)
-}
-
-// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
-// "process.group_leader.pid" semantic conventions. It represents the PID of
-// the process's group leader. This is also the process group ID (PGID) of the
-// process.
-func ProcessGroupLeaderPID(val int) attribute.KeyValue {
- return ProcessGroupLeaderPIDKey.Int(val)
-}
-
-// ProcessInteractive returns an attribute KeyValue conforming to the
-// "process.interactive" semantic conventions. It represents the whether the
-// process is connected to an interactive shell.
-func ProcessInteractive(val bool) attribute.KeyValue {
- return ProcessInteractiveKey.Bool(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PPID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessRealUserID returns an attribute KeyValue conforming to the
-// "process.real_user.id" semantic conventions. It represents the real user ID
-// (RUID) of the process.
-func ProcessRealUserID(val int) attribute.KeyValue {
- return ProcessRealUserIDKey.Int(val)
-}
-
-// ProcessRealUserName returns an attribute KeyValue conforming to the
-// "process.real_user.name" semantic conventions. It represents the username of
-// the real user of the process.
-func ProcessRealUserName(val string) attribute.KeyValue {
- return ProcessRealUserNameKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessSavedUserID returns an attribute KeyValue conforming to the
-// "process.saved_user.id" semantic conventions. It represents the saved user
-// ID (SUID) of the process.
-func ProcessSavedUserID(val int) attribute.KeyValue {
- return ProcessSavedUserIDKey.Int(val)
-}
-
-// ProcessSavedUserName returns an attribute KeyValue conforming to the
-// "process.saved_user.name" semantic conventions. It represents the username
-// of the saved user.
-func ProcessSavedUserName(val string) attribute.KeyValue {
- return ProcessSavedUserNameKey.String(val)
-}
-
-// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
-// "process.session_leader.pid" semantic conventions. It represents the PID of
-// the process's session leader. This is also the session ID (SID) of the
-// process.
-func ProcessSessionLeaderPID(val int) attribute.KeyValue {
- return ProcessSessionLeaderPIDKey.Int(val)
-}
-
-// ProcessUserID returns an attribute KeyValue conforming to the
-// "process.user.id" semantic conventions. It represents the effective user ID
-// (EUID) of the process.
-func ProcessUserID(val int) attribute.KeyValue {
- return ProcessUserIDKey.Int(val)
-}
-
-// ProcessUserName returns an attribute KeyValue conforming to the
-// "process.user.name" semantic conventions. It represents the username of the
-// effective user of the process.
-func ProcessUserName(val string) attribute.KeyValue {
- return ProcessUserNameKey.String(val)
-}
-
-// ProcessVpid returns an attribute KeyValue conforming to the
-// "process.vpid" semantic conventions. It represents the virtual process
-// identifier.
-func ProcessVpid(val int) attribute.KeyValue {
- return ProcessVpidKey.Int(val)
-}
-
-// Attributes for process CPU
-const (
- // ProcessCPUStateKey is the attribute Key conforming to the
- // "process.cpu.state" semantic conventions. It represents the CPU state of
- // the process.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessCPUStateKey = attribute.Key("process.cpu.state")
-)
-
-var (
- // system
- ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
- // user
- ProcessCPUStateUser = ProcessCPUStateKey.String("user")
- // wait
- ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
-)
-
-// Attributes for remote procedure calls.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // doesn't specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCMessageCompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
-
- // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- RPCMessageIDKey = attribute.Key("rpc.message.id")
-
- // RPCMessageTypeKey is the attribute Key conforming to the
- // "rpc.message.type" semantic conventions. It represents the whether this
- // is a received or sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageTypeKey = attribute.Key("rpc.message.type")
-
- // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCSystemKey = attribute.Key("rpc.system")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-var (
- // sent
- RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
- // received
- RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// doesn't specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
-// "rpc.message.compressed_size" semantic conventions. It represents the
-// compressed size of the message in bytes.
-func RPCMessageCompressedSize(val int) attribute.KeyValue {
- return RPCMessageCompressedSizeKey.Int(val)
-}
-
-// RPCMessageID returns an attribute KeyValue conforming to the
-// "rpc.message.id" semantic conventions. It represents the mUST be calculated
-// as two different counters starting from `1` one for sent messages and one
-// for received message.
-func RPCMessageID(val int) attribute.KeyValue {
- return RPCMessageIDKey.Int(val)
-}
-
-// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
-// the "rpc.message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func RPCMessageUncompressedSize(val int) attribute.KeyValue {
- return RPCMessageUncompressedSizeKey.Int(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// These attributes may be used to describe the server in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ServerAddressKey is the attribute Key conforming to the "server.address"
- // semantic conventions. It represents the server domain name if available
- // without reverse DNS lookup; otherwise, IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.address` SHOULD represent the server address
- // behind any intermediaries, for example proxies, if it's available.
- ServerAddressKey = attribute.Key("server.address")
-
- // ServerPortKey is the attribute Key conforming to the "server.port"
- // semantic conventions. It represents the server port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.port` SHOULD represent the server port behind
- // any intermediaries, for example proxies, if it's available.
- ServerPortKey = attribute.Key("server.port")
-)
-
-// ServerAddress returns an attribute KeyValue conforming to the
-// "server.address" semantic conventions. It represents the server domain name
-// if available without reverse DNS lookup; otherwise, IP address or Unix
-// domain socket name.
-func ServerAddress(val string) attribute.KeyValue {
- return ServerAddressKey.String(val)
-}
-
-// ServerPort returns an attribute KeyValue conforming to the "server.port"
-// semantic conventions. It represents the server port number.
-func ServerPort(val int) attribute.KeyValue {
- return ServerPortKey.Int(val)
-}
-
-// A service instance.
-const (
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to
- // distinguish instances of the same service that exist at the same time
- // (e.g. instances of a horizontally scaled
- // service).
- //
- // Implementations, such as SDKs, are recommended to generate a random
- // Version 1 or Version 4 [RFC
- // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
- // inherent unique ID as the source of
- // this value if stability is desirable. In that case, the ID SHOULD be
- // used as source of a UUID Version 5 and
- // SHOULD use the following UUID as the namespace:
- // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
- //
- // UUIDs are typically recommended, as only an opaque value for the
- // purposes of identifying a service instance is
- // needed. Similar to what can be seen in the man page for the
- // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
- // file, the underlying
- // data, such as pod name and namespace should be treated as confidential,
- // being the user's choice to expose it
- // or not via another resource attribute.
- //
- // For applications running behind an application server (like unicorn), we
- // do not recommend using one identifier
- // for all processes participating in the application. Instead, it's
- // recommended each division (e.g. a worker
- // thread in unicorn) to have its own instance.id.
- //
- // It's not recommended for a Collector to set `service.instance.id` if it
- // can't unambiguously determine the
- // service instance that is generating that telemetry. For instance,
- // creating an UUID based on `pod.name` will
- // likely be wrong, as the Collector might not know from which container
- // within that pod the telemetry originated.
- // However, Collectors can set the `service.instance.id` if they can
- // unambiguously determine the service instance
- // for that telemetry. This is typically the case for scraping receivers,
- // as they know the target address and
- // port.
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
- // `process.executable.name` is not available, the value MUST be set to
- // `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation. The format is not defined by these
- // conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0', 'a01dbef8a'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation. The format is not defined by these
-// conventions.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// Session is defined as the period of time encompassing all activities
-// performed by the application and the actions executed by the end user.
-// Consequently, a Session is represented as a collection of Logs, Events, and
-// Spans emitted by the Client Application throughout the Session's duration.
-// Each Session is assigned a unique identifier, which is included as an
-// attribute in the Logs, Events, and Spans generated during the Session's
-// lifecycle.
-// When a session reaches end of life, typically due to user inactivity or
-// session timeout, a new session identifier will be assigned. The previous
-// session identifier may be provided by the instrumentation so that telemetry
-// backends can link the two sessions.
-const (
- // SessionIDKey is the attribute Key conforming to the "session.id"
- // semantic conventions. It represents a unique id to identify a session.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionIDKey = attribute.Key("session.id")
-
- // SessionPreviousIDKey is the attribute Key conforming to the
- // "session.previous_id" semantic conventions. It represents the previous
- // `session.id` for this user, when known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionPreviousIDKey = attribute.Key("session.previous_id")
-)
-
-// SessionID returns an attribute KeyValue conforming to the "session.id"
-// semantic conventions. It represents a unique id to identify a session.
-func SessionID(val string) attribute.KeyValue {
- return SessionIDKey.String(val)
-}
-
-// SessionPreviousID returns an attribute KeyValue conforming to the
-// "session.previous_id" semantic conventions. It represents the previous
-// `session.id` for this user, when known.
-func SessionPreviousID(val string) attribute.KeyValue {
- return SessionPreviousIDKey.String(val)
-}
-
-// SignalR attributes
-const (
- // SignalrConnectionStatusKey is the attribute Key conforming to the
- // "signalr.connection.status" semantic conventions. It represents the
- // signalR HTTP connection closure status.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'app_shutdown', 'timeout'
- SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
-
- // SignalrTransportKey is the attribute Key conforming to the
- // "signalr.transport" semantic conventions. It represents the [SignalR
- // transport
- // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'web_sockets', 'long_polling'
- SignalrTransportKey = attribute.Key("signalr.transport")
-)
-
-var (
- // The connection was closed normally
- SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
- // The connection was closed due to a timeout
- SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
- // The connection was closed because the app is shutting down
- SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
-)
-
-var (
- // ServerSentEvents protocol
- SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
- // LongPolling protocol
- SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
- // WebSockets protocol
- SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
-)
-
-// These attributes may be used to describe the sender of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // SourceAddressKey is the attribute Key conforming to the "source.address"
- // semantic conventions. It represents the source address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the destination side, and when communicating
- // through an intermediary, `source.address` SHOULD represent the source
- // address behind any intermediaries, for example proxies, if it's
- // available.
- SourceAddressKey = attribute.Key("source.address")
-
- // SourcePortKey is the attribute Key conforming to the "source.port"
- // semantic conventions. It represents the source port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- SourcePortKey = attribute.Key("source.port")
-)
-
-// SourceAddress returns an attribute KeyValue conforming to the
-// "source.address" semantic conventions. It represents the source address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func SourceAddress(val string) attribute.KeyValue {
- return SourceAddressKey.String(val)
-}
-
-// SourcePort returns an attribute KeyValue conforming to the "source.port"
-// semantic conventions. It represents the source port number
-func SourcePort(val int) attribute.KeyValue {
- return SourcePortKey.Int(val)
-}
-
-// Describes System attributes
-const (
- // SystemDeviceKey is the attribute Key conforming to the "system.device"
- // semantic conventions. It represents the device identifier
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '(identifier)'
- SystemDeviceKey = attribute.Key("system.device")
-)
-
-// SystemDevice returns an attribute KeyValue conforming to the
-// "system.device" semantic conventions. It represents the device identifier
-func SystemDevice(val string) attribute.KeyValue {
- return SystemDeviceKey.String(val)
-}
-
-// Describes System CPU attributes
-const (
- // SystemCPULogicalNumberKey is the attribute Key conforming to the
- // "system.cpu.logical_number" semantic conventions. It represents the
- // logical CPU number [0..n-1]
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1
- SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
-
- // SystemCPUStateKey is the attribute Key conforming to the
- // "system.cpu.state" semantic conventions. It represents the state of the
- // CPU
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle', 'interrupt'
- SystemCPUStateKey = attribute.Key("system.cpu.state")
-)
-
-var (
- // user
- SystemCPUStateUser = SystemCPUStateKey.String("user")
- // system
- SystemCPUStateSystem = SystemCPUStateKey.String("system")
- // nice
- SystemCPUStateNice = SystemCPUStateKey.String("nice")
- // idle
- SystemCPUStateIdle = SystemCPUStateKey.String("idle")
- // iowait
- SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
- // interrupt
- SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
- // steal
- SystemCPUStateSteal = SystemCPUStateKey.String("steal")
-)
-
-// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
-// "system.cpu.logical_number" semantic conventions. It represents the logical
-// CPU number [0..n-1]
-func SystemCPULogicalNumber(val int) attribute.KeyValue {
- return SystemCPULogicalNumberKey.Int(val)
-}
-
-// Describes System Memory attributes
-const (
- // SystemMemoryStateKey is the attribute Key conforming to the
- // "system.memory.state" semantic conventions. It represents the memory
- // state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free', 'cached'
- SystemMemoryStateKey = attribute.Key("system.memory.state")
-)
-
-var (
- // used
- SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
- // free
- SystemMemoryStateFree = SystemMemoryStateKey.String("free")
- // shared
- SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
- // buffers
- SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
- // cached
- SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
-)
-
-// Describes System Memory Paging attributes
-const (
- // SystemPagingDirectionKey is the attribute Key conforming to the
- // "system.paging.direction" semantic conventions. It represents the paging
- // access direction
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'in'
- SystemPagingDirectionKey = attribute.Key("system.paging.direction")
-
- // SystemPagingStateKey is the attribute Key conforming to the
- // "system.paging.state" semantic conventions. It represents the memory
- // paging state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free'
- SystemPagingStateKey = attribute.Key("system.paging.state")
-
- // SystemPagingTypeKey is the attribute Key conforming to the
- // "system.paging.type" semantic conventions. It represents the memory
- // paging type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'minor'
- SystemPagingTypeKey = attribute.Key("system.paging.type")
-)
-
-var (
- // in
- SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
- // out
- SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
-)
-
-var (
- // used
- SystemPagingStateUsed = SystemPagingStateKey.String("used")
- // free
- SystemPagingStateFree = SystemPagingStateKey.String("free")
-)
-
-var (
- // major
- SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
- // minor
- SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
-)
-
-// Describes Filesystem attributes
-const (
- // SystemFilesystemModeKey is the attribute Key conforming to the
- // "system.filesystem.mode" semantic conventions. It represents the
- // filesystem mode
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'rw, ro'
- SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
-
- // SystemFilesystemMountpointKey is the attribute Key conforming to the
- // "system.filesystem.mountpoint" semantic conventions. It represents the
- // filesystem mount path
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/mnt/data'
- SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
-
- // SystemFilesystemStateKey is the attribute Key conforming to the
- // "system.filesystem.state" semantic conventions. It represents the
- // filesystem state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'used'
- SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
-
- // SystemFilesystemTypeKey is the attribute Key conforming to the
- // "system.filesystem.type" semantic conventions. It represents the
- // filesystem type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ext4'
- SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
-)
-
-var (
- // used
- SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
- // free
- SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
- // reserved
- SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
-)
-
-var (
- // fat32
- SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
- // exfat
- SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
- // ntfs
- SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
- // refs
- SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
- // hfsplus
- SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
- // ext4
- SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
-)
-
-// SystemFilesystemMode returns an attribute KeyValue conforming to the
-// "system.filesystem.mode" semantic conventions. It represents the filesystem
-// mode
-func SystemFilesystemMode(val string) attribute.KeyValue {
- return SystemFilesystemModeKey.String(val)
-}
-
-// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
-// the "system.filesystem.mountpoint" semantic conventions. It represents the
-// filesystem mount path
-func SystemFilesystemMountpoint(val string) attribute.KeyValue {
- return SystemFilesystemMountpointKey.String(val)
-}
-
-// Describes Network attributes
-const (
- // SystemNetworkStateKey is the attribute Key conforming to the
- // "system.network.state" semantic conventions. It represents a stateless
- // protocol MUST NOT set this attribute
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'close_wait'
- SystemNetworkStateKey = attribute.Key("system.network.state")
-)
-
-var (
- // close
- SystemNetworkStateClose = SystemNetworkStateKey.String("close")
- // close_wait
- SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
- // closing
- SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
- // delete
- SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
- // established
- SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
- // fin_wait_1
- SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
- // fin_wait_2
- SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
- // last_ack
- SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
- // listen
- SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
- // syn_recv
- SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
- // syn_sent
- SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
- // time_wait
- SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
-)
-
-// Describes System Process attributes
-const (
- // SystemProcessStatusKey is the attribute Key conforming to the
- // "system.process.status" semantic conventions. It represents the process
- // state, e.g., [Linux Process State
- // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'running'
- SystemProcessStatusKey = attribute.Key("system.process.status")
-)
-
-var (
- // running
- SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
- // sleeping
- SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
- // stopped
- SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
- // defunct
- SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
-)
-
-// Attributes for telemetry SDK.
-const (
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'opentelemetry'
- // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
- // to `opentelemetry`.
- // If another SDK, like a fork or a vendor-provided implementation, is
- // used, this SDK MUST set the
- // `telemetry.sdk.name` attribute to the fully-qualified class or module
- // name of this SDK's main entry point
- // or another suitable identifier depending on the language.
- // The identifier `opentelemetry` is reserved and MUST NOT be used in this
- // case.
- // All custom identifiers SHOULD be stable across different versions of an
- // implementation.
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-
- // TelemetryDistroNameKey is the attribute Key conforming to the
- // "telemetry.distro.name" semantic conventions. It represents the name of
- // the auto instrumentation agent or distribution, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'parts-unlimited-java'
- // Note: Official auto instrumentation agents and distributions SHOULD set
- // the `telemetry.distro.name` attribute to
- // a string starting with `opentelemetry-`, e.g.
- // `opentelemetry-java-instrumentation`.
- TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
-
- // TelemetryDistroVersionKey is the attribute Key conforming to the
- // "telemetry.distro.version" semantic conventions. It represents the
- // version string of the auto instrumentation agent or distribution, if
- // used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2.3'
- TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // rust
- TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// TelemetryDistroName returns an attribute KeyValue conforming to the
-// "telemetry.distro.name" semantic conventions. It represents the name of the
-// auto instrumentation agent or distribution, if used.
-func TelemetryDistroName(val string) attribute.KeyValue {
- return TelemetryDistroNameKey.String(val)
-}
-
-// TelemetryDistroVersion returns an attribute KeyValue conforming to the
-// "telemetry.distro.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent or distribution, if used.
-func TelemetryDistroVersion(val string) attribute.KeyValue {
- return TelemetryDistroVersionKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// Semantic convention attributes in the TLS namespace.
-const (
- // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
- // semantic conventions. It represents the string indicating the
- // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
- // used during the current connection.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
- // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
- // Note: The values allowed for `tls.cipher` MUST be one of the
- // `Descriptions` of the [registered TLS Cipher
- // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
- TLSCipherKey = attribute.Key("tls.cipher")
-
- // TLSClientCertificateKey is the attribute Key conforming to the
- // "tls.client.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the client. This is
- // usually mutually-exclusive of `client.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSClientCertificateKey = attribute.Key("tls.client.certificate")
-
- // TLSClientCertificateChainKey is the attribute Key conforming to the
- // "tls.client.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the client. This is usually mutually-exclusive of
- // `client.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
-
- // TLSClientHashMd5Key is the attribute Key conforming to the
- // "tls.client.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
-
- // TLSClientHashSha1Key is the attribute Key conforming to the
- // "tls.client.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
-
- // TLSClientHashSha256Key is the attribute Key conforming to the
- // "tls.client.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
-
- // TLSClientIssuerKey is the attribute Key conforming to the
- // "tls.client.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSClientIssuerKey = attribute.Key("tls.client.issuer")
-
- // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
- // semantic conventions. It represents a hash that identifies clients based
- // on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSClientJa3Key = attribute.Key("tls.client.ja3")
-
- // TLSClientNotAfterKey is the attribute Key conforming to the
- // "tls.client.not_after" semantic conventions. It represents the date/Time
- // indicating when client certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
-
- // TLSClientNotBeforeKey is the attribute Key conforming to the
- // "tls.client.not_before" semantic conventions. It represents the
- // date/Time indicating when client certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
-
- // TLSClientServerNameKey is the attribute Key conforming to the
- // "tls.client.server_name" semantic conventions. It represents the also
- // called an SNI, this tells the server which hostname to which the client
- // is attempting to connect to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry.io'
- TLSClientServerNameKey = attribute.Key("tls.client.server_name")
-
- // TLSClientSubjectKey is the attribute Key conforming to the
- // "tls.client.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
- TLSClientSubjectKey = attribute.Key("tls.client.subject")
-
- // TLSClientSupportedCiphersKey is the attribute Key conforming to the
- // "tls.client.supported_ciphers" semantic conventions. It represents the
- // array of ciphers offered by the client during the client hello.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
- TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
-
- // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
- // conventions. It represents the string indicating the curve used for the
- // given cipher, when applicable
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'secp256r1'
- TLSCurveKey = attribute.Key("tls.curve")
-
- // TLSEstablishedKey is the attribute Key conforming to the
- // "tls.established" semantic conventions. It represents the boolean flag
- // indicating if the TLS negotiation was successful and transitioned to an
- // encrypted tunnel.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSEstablishedKey = attribute.Key("tls.established")
-
- // TLSNextProtocolKey is the attribute Key conforming to the
- // "tls.next_protocol" semantic conventions. It represents the string
- // indicating the protocol being tunneled. Per the values in the [IANA
- // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
- // this string should be lower case.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'http/1.1'
- TLSNextProtocolKey = attribute.Key("tls.next_protocol")
-
- // TLSProtocolNameKey is the attribute Key conforming to the
- // "tls.protocol.name" semantic conventions. It represents the normalized
- // lowercase protocol name parsed from original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- TLSProtocolNameKey = attribute.Key("tls.protocol.name")
-
- // TLSProtocolVersionKey is the attribute Key conforming to the
- // "tls.protocol.version" semantic conventions. It represents the numeric
- // part of the version parsed from the original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2', '3'
- TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
-
- // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
- // semantic conventions. It represents the boolean flag indicating if this
- // TLS connection was resumed from an existing TLS negotiation.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSResumedKey = attribute.Key("tls.resumed")
-
- // TLSServerCertificateKey is the attribute Key conforming to the
- // "tls.server.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the server. This is
- // usually mutually-exclusive of `server.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSServerCertificateKey = attribute.Key("tls.server.certificate")
-
- // TLSServerCertificateChainKey is the attribute Key conforming to the
- // "tls.server.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the server. This is usually mutually-exclusive of
- // `server.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
-
- // TLSServerHashMd5Key is the attribute Key conforming to the
- // "tls.server.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
-
- // TLSServerHashSha1Key is the attribute Key conforming to the
- // "tls.server.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
-
- // TLSServerHashSha256Key is the attribute Key conforming to the
- // "tls.server.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
-
- // TLSServerIssuerKey is the attribute Key conforming to the
- // "tls.server.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSServerIssuerKey = attribute.Key("tls.server.issuer")
-
- // TLSServerJa3sKey is the attribute Key conforming to the
- // "tls.server.ja3s" semantic conventions. It represents a hash that
- // identifies servers based on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
-
- // TLSServerNotAfterKey is the attribute Key conforming to the
- // "tls.server.not_after" semantic conventions. It represents the date/Time
- // indicating when server certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
-
- // TLSServerNotBeforeKey is the attribute Key conforming to the
- // "tls.server.not_before" semantic conventions. It represents the
- // date/Time indicating when server certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
-
- // TLSServerSubjectKey is the attribute Key conforming to the
- // "tls.server.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // server.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
- TLSServerSubjectKey = attribute.Key("tls.server.subject")
-)
-
-var (
- // ssl
- TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
- // tls
- TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
-)
-
-// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
-// semantic conventions. It represents the string indicating the
-// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
-// during the current connection.
-func TLSCipher(val string) attribute.KeyValue {
- return TLSCipherKey.String(val)
-}
-
-// TLSClientCertificate returns an attribute KeyValue conforming to the
-// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the client. This is usually
-// mutually-exclusive of `client.certificate_chain` since this value also
-// exists in that list.
-func TLSClientCertificate(val string) attribute.KeyValue {
- return TLSClientCertificateKey.String(val)
-}
-
-// TLSClientCertificateChain returns an attribute KeyValue conforming to the
-// "tls.client.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the client. This is usually mutually-exclusive of `client.certificate` since
-// that value should be the first certificate in the chain.
-func TLSClientCertificateChain(val ...string) attribute.KeyValue {
- return TLSClientCertificateChainKey.StringSlice(val)
-}
-
-// TLSClientHashMd5 returns an attribute KeyValue conforming to the
-// "tls.client.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashMd5(val string) attribute.KeyValue {
- return TLSClientHashMd5Key.String(val)
-}
-
-// TLSClientHashSha1 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha1(val string) attribute.KeyValue {
- return TLSClientHashSha1Key.String(val)
-}
-
-// TLSClientHashSha256 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha256(val string) attribute.KeyValue {
- return TLSClientHashSha256Key.String(val)
-}
-
-// TLSClientIssuer returns an attribute KeyValue conforming to the
-// "tls.client.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSClientIssuer(val string) attribute.KeyValue {
- return TLSClientIssuerKey.String(val)
-}
-
-// TLSClientJa3 returns an attribute KeyValue conforming to the
-// "tls.client.ja3" semantic conventions. It represents a hash that identifies
-// clients based on how they perform an SSL/TLS handshake.
-func TLSClientJa3(val string) attribute.KeyValue {
- return TLSClientJa3Key.String(val)
-}
-
-// TLSClientNotAfter returns an attribute KeyValue conforming to the
-// "tls.client.not_after" semantic conventions. It represents the date/Time
-// indicating when client certificate is no longer considered valid.
-func TLSClientNotAfter(val string) attribute.KeyValue {
- return TLSClientNotAfterKey.String(val)
-}
-
-// TLSClientNotBefore returns an attribute KeyValue conforming to the
-// "tls.client.not_before" semantic conventions. It represents the date/Time
-// indicating when client certificate is first considered valid.
-func TLSClientNotBefore(val string) attribute.KeyValue {
- return TLSClientNotBeforeKey.String(val)
-}
-
-// TLSClientServerName returns an attribute KeyValue conforming to the
-// "tls.client.server_name" semantic conventions. It represents the also called
-// an SNI, this tells the server which hostname to which the client is
-// attempting to connect to.
-func TLSClientServerName(val string) attribute.KeyValue {
- return TLSClientServerNameKey.String(val)
-}
-
-// TLSClientSubject returns an attribute KeyValue conforming to the
-// "tls.client.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the client.
-func TLSClientSubject(val string) attribute.KeyValue {
- return TLSClientSubjectKey.String(val)
-}
-
-// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
-// "tls.client.supported_ciphers" semantic conventions. It represents the array
-// of ciphers offered by the client during the client hello.
-func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
- return TLSClientSupportedCiphersKey.StringSlice(val)
-}
-
-// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
-// semantic conventions. It represents the string indicating the curve used for
-// the given cipher, when applicable
-func TLSCurve(val string) attribute.KeyValue {
- return TLSCurveKey.String(val)
-}
-
-// TLSEstablished returns an attribute KeyValue conforming to the
-// "tls.established" semantic conventions. It represents the boolean flag
-// indicating if the TLS negotiation was successful and transitioned to an
-// encrypted tunnel.
-func TLSEstablished(val bool) attribute.KeyValue {
- return TLSEstablishedKey.Bool(val)
-}
-
-// TLSNextProtocol returns an attribute KeyValue conforming to the
-// "tls.next_protocol" semantic conventions. It represents the string
-// indicating the protocol being tunneled. Per the values in the [IANA
-// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
-// this string should be lower case.
-func TLSNextProtocol(val string) attribute.KeyValue {
- return TLSNextProtocolKey.String(val)
-}
-
-// TLSProtocolVersion returns an attribute KeyValue conforming to the
-// "tls.protocol.version" semantic conventions. It represents the numeric part
-// of the version parsed from the original string of the negotiated [SSL/TLS
-// protocol
-// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
-func TLSProtocolVersion(val string) attribute.KeyValue {
- return TLSProtocolVersionKey.String(val)
-}
-
-// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
-// semantic conventions. It represents the boolean flag indicating if this TLS
-// connection was resumed from an existing TLS negotiation.
-func TLSResumed(val bool) attribute.KeyValue {
- return TLSResumedKey.Bool(val)
-}
-
-// TLSServerCertificate returns an attribute KeyValue conforming to the
-// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the server. This is usually
-// mutually-exclusive of `server.certificate_chain` since this value also
-// exists in that list.
-func TLSServerCertificate(val string) attribute.KeyValue {
- return TLSServerCertificateKey.String(val)
-}
-
-// TLSServerCertificateChain returns an attribute KeyValue conforming to the
-// "tls.server.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the server. This is usually mutually-exclusive of `server.certificate` since
-// that value should be the first certificate in the chain.
-func TLSServerCertificateChain(val ...string) attribute.KeyValue {
- return TLSServerCertificateChainKey.StringSlice(val)
-}
-
-// TLSServerHashMd5 returns an attribute KeyValue conforming to the
-// "tls.server.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashMd5(val string) attribute.KeyValue {
- return TLSServerHashMd5Key.String(val)
-}
-
-// TLSServerHashSha1 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha1(val string) attribute.KeyValue {
- return TLSServerHashSha1Key.String(val)
-}
-
-// TLSServerHashSha256 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha256(val string) attribute.KeyValue {
- return TLSServerHashSha256Key.String(val)
-}
-
-// TLSServerIssuer returns an attribute KeyValue conforming to the
-// "tls.server.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSServerIssuer(val string) attribute.KeyValue {
- return TLSServerIssuerKey.String(val)
-}
-
-// TLSServerJa3s returns an attribute KeyValue conforming to the
-// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
-// servers based on how they perform an SSL/TLS handshake.
-func TLSServerJa3s(val string) attribute.KeyValue {
- return TLSServerJa3sKey.String(val)
-}
-
-// TLSServerNotAfter returns an attribute KeyValue conforming to the
-// "tls.server.not_after" semantic conventions. It represents the date/Time
-// indicating when server certificate is no longer considered valid.
-func TLSServerNotAfter(val string) attribute.KeyValue {
- return TLSServerNotAfterKey.String(val)
-}
-
-// TLSServerNotBefore returns an attribute KeyValue conforming to the
-// "tls.server.not_before" semantic conventions. It represents the date/Time
-// indicating when server certificate is first considered valid.
-func TLSServerNotBefore(val string) attribute.KeyValue {
- return TLSServerNotBeforeKey.String(val)
-}
-
-// TLSServerSubject returns an attribute KeyValue conforming to the
-// "tls.server.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the server.
-func TLSServerSubject(val string) attribute.KeyValue {
- return TLSServerSubjectKey.String(val)
-}
-
-// Attributes describing URL.
-const (
- // URLDomainKey is the attribute Key conforming to the "url.domain"
- // semantic conventions. It represents the domain extracted from the
- // `url.full`, such as "opentelemetry.io".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
- // '[1080:0:0:0:8:800:200C:417A]'
- // Note: In some cases a URL may refer to an IP and/or port directly,
- // without a domain name. In this case, the IP address would go to the
- // domain field. If the URL contains a [literal IPv6
- // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
- // `[` and `]`, the `[` and `]` characters should also be captured in the
- // domain field.
- URLDomainKey = attribute.Key("url.domain")
-
- // URLExtensionKey is the attribute Key conforming to the "url.extension"
- // semantic conventions. It represents the file extension extracted from
- // the `url.full`, excluding the leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: The file extension is only set if it exists, as not every url has
- // a file extension. When the file name has multiple extensions
- // `example.tar.gz`, only the last one should be captured `gz`, not
- // `tar.gz`.
- URLExtensionKey = attribute.Key("url.extension")
-
- // URLFragmentKey is the attribute Key conforming to the "url.fragment"
- // semantic conventions. It represents the [URI
- // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'SemConv'
- URLFragmentKey = attribute.Key("url.fragment")
-
- // URLFullKey is the attribute Key conforming to the "url.full" semantic
- // conventions. It represents the absolute URL describing a network
- // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // '//localhost'
- // Note: For network calls, URL usually has
- // `scheme://host[:port][path][?query][#fragment]` format, where the
- // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
- // included nevertheless.
- // `url.full` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case username and
- // password SHOULD be redacted and attribute's value SHOULD be
- // `https://REDACTED:REDACTED@www.example.com/`.
- // `url.full` SHOULD capture the absolute URL when it is available (or can
- // be reconstructed). Sensitive content provided in `url.full` SHOULD be
- // scrubbed when instrumentations can identify it.
- URLFullKey = attribute.Key("url.full")
-
- // URLOriginalKey is the attribute Key conforming to the "url.original"
- // semantic conventions. It represents the unmodified original URL as seen
- // in the event source.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // 'search?q=OpenTelemetry'
- // Note: In network monitoring, the observed URL may be a full URL, whereas
- // in access logs, the URL is often just represented as a path. This field
- // is meant to represent the URL as it was observed, complete or not.
- // `url.original` might contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case password and
- // username SHOULD NOT be redacted and attribute's value SHOULD remain the
- // same.
- URLOriginalKey = attribute.Key("url.original")
-
- // URLPathKey is the attribute Key conforming to the "url.path" semantic
- // conventions. It represents the [URI
- // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/search'
- // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLPathKey = attribute.Key("url.path")
-
- // URLPortKey is the attribute Key conforming to the "url.port" semantic
- // conventions. It represents the port extracted from the `url.full`
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 443
- URLPortKey = attribute.Key("url.port")
-
- // URLQueryKey is the attribute Key conforming to the "url.query" semantic
- // conventions. It represents the [URI
- // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'q=OpenTelemetry'
- // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLQueryKey = attribute.Key("url.query")
-
- // URLRegisteredDomainKey is the attribute Key conforming to the
- // "url.registered_domain" semantic conventions. It represents the highest
- // registered url domain, stripped of the subdomain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.com', 'foo.co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org). For example, the registered domain for
- // `foo.example.com` is `example.com`. Trying to approximate this by simply
- // taking the last two labels will not work well for TLDs such as `co.uk`.
- URLRegisteredDomainKey = attribute.Key("url.registered_domain")
-
- // URLSchemeKey is the attribute Key conforming to the "url.scheme"
- // semantic conventions. It represents the [URI
- // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
- // identifying the used protocol.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https', 'ftp', 'telnet'
- URLSchemeKey = attribute.Key("url.scheme")
-
- // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
- // semantic conventions. It represents the subdomain portion of a fully
- // qualified domain name includes all of the names except the host name
- // under the registered_domain. In a partially qualified domain, or if the
- // qualification level of the full name cannot be determined, subdomain
- // contains all of the names below the registered domain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'east', 'sub2.sub1'
- // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
- // the domain has multiple levels of subdomain, such as
- // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
- // with no trailing period.
- URLSubdomainKey = attribute.Key("url.subdomain")
-
- // URLTemplateKey is the attribute Key conforming to the "url.template"
- // semantic conventions. It represents the low-cardinality template of an
- // [absolute path
- // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/users/{id}', '/users/:id', '/users?id={id}'
- URLTemplateKey = attribute.Key("url.template")
-
- // URLTopLevelDomainKey is the attribute Key conforming to the
- // "url.top_level_domain" semantic conventions. It represents the effective
- // top level domain (eTLD), also known as the domain suffix, is the last
- // part of the domain name. For example, the top level domain for
- // example.com is `com`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com', 'co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org).
- URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
-)
-
-// URLDomain returns an attribute KeyValue conforming to the "url.domain"
-// semantic conventions. It represents the domain extracted from the
-// `url.full`, such as "opentelemetry.io".
-func URLDomain(val string) attribute.KeyValue {
- return URLDomainKey.String(val)
-}
-
-// URLExtension returns an attribute KeyValue conforming to the
-// "url.extension" semantic conventions. It represents the file extension
-// extracted from the `url.full`, excluding the leading dot.
-func URLExtension(val string) attribute.KeyValue {
- return URLExtensionKey.String(val)
-}
-
-// URLFragment returns an attribute KeyValue conforming to the
-// "url.fragment" semantic conventions. It represents the [URI
-// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
-func URLFragment(val string) attribute.KeyValue {
- return URLFragmentKey.String(val)
-}
-
-// URLFull returns an attribute KeyValue conforming to the "url.full"
-// semantic conventions. It represents the absolute URL describing a network
-// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
-func URLFull(val string) attribute.KeyValue {
- return URLFullKey.String(val)
-}
-
-// URLOriginal returns an attribute KeyValue conforming to the
-// "url.original" semantic conventions. It represents the unmodified original
-// URL as seen in the event source.
-func URLOriginal(val string) attribute.KeyValue {
- return URLOriginalKey.String(val)
-}
-
-// URLPath returns an attribute KeyValue conforming to the "url.path"
-// semantic conventions. It represents the [URI
-// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
-func URLPath(val string) attribute.KeyValue {
- return URLPathKey.String(val)
-}
-
-// URLPort returns an attribute KeyValue conforming to the "url.port"
-// semantic conventions. It represents the port extracted from the `url.full`
-func URLPort(val int) attribute.KeyValue {
- return URLPortKey.Int(val)
-}
-
-// URLQuery returns an attribute KeyValue conforming to the "url.query"
-// semantic conventions. It represents the [URI
-// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
-func URLQuery(val string) attribute.KeyValue {
- return URLQueryKey.String(val)
-}
-
-// URLRegisteredDomain returns an attribute KeyValue conforming to the
-// "url.registered_domain" semantic conventions. It represents the highest
-// registered url domain, stripped of the subdomain.
-func URLRegisteredDomain(val string) attribute.KeyValue {
- return URLRegisteredDomainKey.String(val)
-}
-
-// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
-// semantic conventions. It represents the [URI
-// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
-// identifying the used protocol.
-func URLScheme(val string) attribute.KeyValue {
- return URLSchemeKey.String(val)
-}
-
-// URLSubdomain returns an attribute KeyValue conforming to the
-// "url.subdomain" semantic conventions. It represents the subdomain portion of
-// a fully qualified domain name includes all of the names except the host name
-// under the registered_domain. In a partially qualified domain, or if the
-// qualification level of the full name cannot be determined, subdomain
-// contains all of the names below the registered domain.
-func URLSubdomain(val string) attribute.KeyValue {
- return URLSubdomainKey.String(val)
-}
-
-// URLTemplate returns an attribute KeyValue conforming to the
-// "url.template" semantic conventions. It represents the low-cardinality
-// template of an [absolute path
-// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
-func URLTemplate(val string) attribute.KeyValue {
- return URLTemplateKey.String(val)
-}
-
-// URLTopLevelDomain returns an attribute KeyValue conforming to the
-// "url.top_level_domain" semantic conventions. It represents the effective top
-// level domain (eTLD), also known as the domain suffix, is the last part of
-// the domain name. For example, the top level domain for example.com is `com`.
-func URLTopLevelDomain(val string) attribute.KeyValue {
- return URLTopLevelDomainKey.String(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentNameKey is the attribute Key conforming to the
- // "user_agent.name" semantic conventions. It represents the name of the
- // user-agent extracted from original. Usually refers to the browser's
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Safari', 'YourApp'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's name
- // from original string. In the case of using a user-agent for non-browser
- // products, such as microservices with multiple names/versions inside the
- // `user_agent.original`, the most significant name SHOULD be selected. In
- // such a scenario it should align with `user_agent.version`
- UserAgentNameKey = attribute.Key("user_agent.name")
-
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
- // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
- // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
- // grpc-java-okhttp/1.27.2'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-
- // UserAgentVersionKey is the attribute Key conforming to the
- // "user_agent.version" semantic conventions. It represents the version of
- // the user-agent extracted from original. Usually refers to the browser's
- // version
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.1.2', '1.0.0'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's
- // version from original string. In the case of using a user-agent for
- // non-browser products, such as microservices with multiple names/versions
- // inside the `user_agent.original`, the most significant version SHOULD be
- // selected. In such a scenario it should align with `user_agent.name`
- UserAgentVersionKey = attribute.Key("user_agent.version")
-)
-
-// UserAgentName returns an attribute KeyValue conforming to the
-// "user_agent.name" semantic conventions. It represents the name of the
-// user-agent extracted from original. Usually refers to the browser's name.
-func UserAgentName(val string) attribute.KeyValue {
- return UserAgentNameKey.String(val)
-}
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
-
-// UserAgentVersion returns an attribute KeyValue conforming to the
-// "user_agent.version" semantic conventions. It represents the version of the
-// user-agent extracted from original. Usually refers to the browser's version
-func UserAgentVersion(val string) attribute.KeyValue {
- return UserAgentVersionKey.String(val)
-}
-
-// The attributes used to describe the packaged software running the
-// application code.
-const (
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-)
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
deleted file mode 100644
index fcdb9f48..00000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
+++ /dev/null
@@ -1,1307 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-const (
-
- // ContainerCPUTime is the metric conforming to the "container.cpu.time"
- // semantic conventions. It represents the total CPU time consumed.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ContainerCPUTimeName = "container.cpu.time"
- ContainerCPUTimeUnit = "s"
- ContainerCPUTimeDescription = "Total CPU time consumed"
-
- // ContainerMemoryUsage is the metric conforming to the
- // "container.memory.usage" semantic conventions. It represents the memory
- // usage of the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerMemoryUsageName = "container.memory.usage"
- ContainerMemoryUsageUnit = "By"
- ContainerMemoryUsageDescription = "Memory usage of the container."
-
- // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
- // conventions. It represents the disk bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerDiskIoName = "container.disk.io"
- ContainerDiskIoUnit = "By"
- ContainerDiskIoDescription = "Disk bytes for the container."
-
- // ContainerNetworkIo is the metric conforming to the "container.network.io"
- // semantic conventions. It represents the network bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerNetworkIoName = "container.network.io"
- ContainerNetworkIoUnit = "By"
- ContainerNetworkIoDescription = "Network bytes for the container."
-
- // DBClientOperationDuration is the metric conforming to the
- // "db.client.operation.duration" semantic conventions. It represents the
- // duration of database client operations.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientOperationDurationName = "db.client.operation.duration"
- DBClientOperationDurationUnit = "s"
- DBClientOperationDurationDescription = "Duration of database client operations."
-
- // DBClientConnectionCount is the metric conforming to the
- // "db.client.connection.count" semantic conventions. It represents the number
- // of connections that are currently in state described by the `state`
- // attribute.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionCountName = "db.client.connection.count"
- DBClientConnectionCountUnit = "{connection}"
- DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
-
- // DBClientConnectionIdleMax is the metric conforming to the
- // "db.client.connection.idle.max" semantic conventions. It represents the
- // maximum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMaxName = "db.client.connection.idle.max"
- DBClientConnectionIdleMaxUnit = "{connection}"
- DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
-
- // DBClientConnectionIdleMin is the metric conforming to the
- // "db.client.connection.idle.min" semantic conventions. It represents the
- // minimum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMinName = "db.client.connection.idle.min"
- DBClientConnectionIdleMinUnit = "{connection}"
- DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
-
- // DBClientConnectionMax is the metric conforming to the
- // "db.client.connection.max" semantic conventions. It represents the maximum
- // number of open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionMaxName = "db.client.connection.max"
- DBClientConnectionMaxUnit = "{connection}"
- DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
-
- // DBClientConnectionPendingRequests is the metric conforming to the
- // "db.client.connection.pending_requests" semantic conventions. It represents
- // the number of pending requests for an open connection, cumulative for the
- // entire pool.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests"
- DBClientConnectionPendingRequestsUnit = "{request}"
- DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
-
- // DBClientConnectionTimeouts is the metric conforming to the
- // "db.client.connection.timeouts" semantic conventions. It represents the
- // number of connection timeouts that have occurred trying to obtain a
- // connection from the pool.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionTimeoutsName = "db.client.connection.timeouts"
- DBClientConnectionTimeoutsUnit = "{timeout}"
- DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
-
- // DBClientConnectionCreateTime is the metric conforming to the
- // "db.client.connection.create_time" semantic conventions. It represents the
- // time it took to create a new connection.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionCreateTimeName = "db.client.connection.create_time"
- DBClientConnectionCreateTimeUnit = "s"
- DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
-
- // DBClientConnectionWaitTime is the metric conforming to the
- // "db.client.connection.wait_time" semantic conventions. It represents the
- // time it took to obtain an open connection from the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionWaitTimeName = "db.client.connection.wait_time"
- DBClientConnectionWaitTimeUnit = "s"
- DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
-
- // DBClientConnectionUseTime is the metric conforming to the
- // "db.client.connection.use_time" semantic conventions. It represents the time
- // between borrowing a connection and returning it to the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionUseTimeName = "db.client.connection.use_time"
- DBClientConnectionUseTimeUnit = "s"
- DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
-
- // DBClientConnectionsUsage is the metric conforming to the
- // "db.client.connections.usage" semantic conventions. It represents the
- // deprecated, use `db.client.connection.count` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsUsageName = "db.client.connections.usage"
- DBClientConnectionsUsageUnit = "{connection}"
- DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
-
- // DBClientConnectionsIdleMax is the metric conforming to the
- // "db.client.connections.idle.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
- DBClientConnectionsIdleMaxUnit = "{connection}"
- DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
-
- // DBClientConnectionsIdleMin is the metric conforming to the
- // "db.client.connections.idle.min" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.min` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
- DBClientConnectionsIdleMinUnit = "{connection}"
- DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
-
- // DBClientConnectionsMax is the metric conforming to the
- // "db.client.connections.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsMaxName = "db.client.connections.max"
- DBClientConnectionsMaxUnit = "{connection}"
- DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
-
- // DBClientConnectionsPendingRequests is the metric conforming to the
- // "db.client.connections.pending_requests" semantic conventions. It represents
- // the deprecated, use `db.client.connection.pending_requests` instead.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
- DBClientConnectionsPendingRequestsUnit = "{request}"
- DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
-
- // DBClientConnectionsTimeouts is the metric conforming to the
- // "db.client.connections.timeouts" semantic conventions. It represents the
- // deprecated, use `db.client.connection.timeouts` instead.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
- DBClientConnectionsTimeoutsUnit = "{timeout}"
- DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
-
- // DBClientConnectionsCreateTime is the metric conforming to the
- // "db.client.connections.create_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.create_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
- DBClientConnectionsCreateTimeUnit = "ms"
- DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsWaitTime is the metric conforming to the
- // "db.client.connections.wait_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.wait_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
- DBClientConnectionsWaitTimeUnit = "ms"
- DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsUseTime is the metric conforming to the
- // "db.client.connections.use_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.use_time` instead. Note: the unit also
- // changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsUseTimeName = "db.client.connections.use_time"
- DBClientConnectionsUseTimeUnit = "ms"
- DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
- // semantic conventions. It represents the measures the time taken to perform a
- // DNS lookup.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DNSLookupDurationName = "dns.lookup.duration"
- DNSLookupDurationUnit = "s"
- DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
-
- // AspnetcoreRoutingMatchAttempts is the metric conforming to the
- // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
- // number of requests that were attempted to be matched to an endpoint.
- // Instrument: counter
- // Unit: {match_attempt}
- // Stability: Stable
- AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
- AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
- AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
-
- // AspnetcoreDiagnosticsExceptions is the metric conforming to the
- // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
- // number of exceptions caught by exception handling middleware.
- // Instrument: counter
- // Unit: {exception}
- // Stability: Stable
- AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
- AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
- AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
-
- // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
- // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
- // represents the number of requests that are currently active on the server
- // that hold a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
- AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
- AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
-
- // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
- // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
- // represents the duration of rate limiting lease held by requests on the
- // server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
- AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
- AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
-
- // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
- // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
- // represents the time the request spent in a queue waiting to acquire a rate
- // limiting lease.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
- AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
- AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
- // represents the number of requests that are currently queued, waiting to
- // acquire a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
- AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
- AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
- // number of requests that tried to acquire a rate limiting lease.
- // Instrument: counter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
- AspnetcoreRateLimitingRequestsUnit = "{request}"
- AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
-
- // KestrelActiveConnections is the metric conforming to the
- // "kestrel.active_connections" semantic conventions. It represents the number
- // of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelActiveConnectionsName = "kestrel.active_connections"
- KestrelActiveConnectionsUnit = "{connection}"
- KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // KestrelConnectionDuration is the metric conforming to the
- // "kestrel.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelConnectionDurationName = "kestrel.connection.duration"
- KestrelConnectionDurationUnit = "s"
- KestrelConnectionDurationDescription = "The duration of connections on the server."
-
- // KestrelRejectedConnections is the metric conforming to the
- // "kestrel.rejected_connections" semantic conventions. It represents the
- // number of connections rejected by the server.
- // Instrument: counter
- // Unit: {connection}
- // Stability: Stable
- KestrelRejectedConnectionsName = "kestrel.rejected_connections"
- KestrelRejectedConnectionsUnit = "{connection}"
- KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
-
- // KestrelQueuedConnections is the metric conforming to the
- // "kestrel.queued_connections" semantic conventions. It represents the number
- // of connections that are currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelQueuedConnectionsName = "kestrel.queued_connections"
- KestrelQueuedConnectionsUnit = "{connection}"
- KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
-
- // KestrelQueuedRequests is the metric conforming to the
- // "kestrel.queued_requests" semantic conventions. It represents the number of
- // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
- // currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- KestrelQueuedRequestsName = "kestrel.queued_requests"
- KestrelQueuedRequestsUnit = "{request}"
- KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
-
- // KestrelUpgradedConnections is the metric conforming to the
- // "kestrel.upgraded_connections" semantic conventions. It represents the
- // number of connections that are currently upgraded (WebSockets). .
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
- KestrelUpgradedConnectionsUnit = "{connection}"
- KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
-
- // KestrelTLSHandshakeDuration is the metric conforming to the
- // "kestrel.tls_handshake.duration" semantic conventions. It represents the
- // duration of TLS handshakes on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
- KestrelTLSHandshakeDurationUnit = "s"
- KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
-
- // KestrelActiveTLSHandshakes is the metric conforming to the
- // "kestrel.active_tls_handshakes" semantic conventions. It represents the
- // number of TLS handshakes that are currently in progress on the server.
- // Instrument: updowncounter
- // Unit: {handshake}
- // Stability: Stable
- KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
- KestrelActiveTLSHandshakesUnit = "{handshake}"
- KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
-
- // SignalrServerConnectionDuration is the metric conforming to the
- // "signalr.server.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- SignalrServerConnectionDurationName = "signalr.server.connection.duration"
- SignalrServerConnectionDurationUnit = "s"
- SignalrServerConnectionDurationDescription = "The duration of connections on the server."
-
- // SignalrServerActiveConnections is the metric conforming to the
- // "signalr.server.active_connections" semantic conventions. It represents the
- // number of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- SignalrServerActiveConnectionsName = "signalr.server.active_connections"
- SignalrServerActiveConnectionsUnit = "{connection}"
- SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's logic execution.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInvokeDurationName = "faas.invoke_duration"
- FaaSInvokeDurationUnit = "s"
- FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
-
- // FaaSInitDuration is the metric conforming to the "faas.init_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's initialization, such as a cold start.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInitDurationName = "faas.init_duration"
- FaaSInitDurationUnit = "s"
- FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
-
- // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
- // conventions. It represents the number of invocation cold starts.
- // Instrument: counter
- // Unit: {coldstart}
- // Stability: Experimental
- FaaSColdstartsName = "faas.coldstarts"
- FaaSColdstartsUnit = "{coldstart}"
- FaaSColdstartsDescription = "Number of invocation cold starts"
-
- // FaaSErrors is the metric conforming to the "faas.errors" semantic
- // conventions. It represents the number of invocation errors.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- FaaSErrorsName = "faas.errors"
- FaaSErrorsUnit = "{error}"
- FaaSErrorsDescription = "Number of invocation errors"
-
- // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
- // conventions. It represents the number of successful invocations.
- // Instrument: counter
- // Unit: {invocation}
- // Stability: Experimental
- FaaSInvocationsName = "faas.invocations"
- FaaSInvocationsUnit = "{invocation}"
- FaaSInvocationsDescription = "Number of successful invocations"
-
- // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
- // conventions. It represents the number of invocation timeouts.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- FaaSTimeoutsName = "faas.timeouts"
- FaaSTimeoutsUnit = "{timeout}"
- FaaSTimeoutsDescription = "Number of invocation timeouts"
-
- // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
- // conventions. It represents the distribution of max memory usage per
- // invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSMemUsageName = "faas.mem_usage"
- FaaSMemUsageUnit = "By"
- FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
-
- // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
- // conventions. It represents the distribution of CPU usage per invocation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSCPUUsageName = "faas.cpu_usage"
- FaaSCPUUsageUnit = "s"
- FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
-
- // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
- // conventions. It represents the distribution of net I/O usage per invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSNetIoName = "faas.net_io"
- FaaSNetIoUnit = "By"
- FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
-
- // HTTPServerRequestDuration is the metric conforming to the
- // "http.server.request.duration" semantic conventions. It represents the
- // duration of HTTP server requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPServerRequestDurationName = "http.server.request.duration"
- HTTPServerRequestDurationUnit = "s"
- HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
-
- // HTTPServerActiveRequests is the metric conforming to the
- // "http.server.active_requests" semantic conventions. It represents the number
- // of active HTTP server requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPServerActiveRequestsName = "http.server.active_requests"
- HTTPServerActiveRequestsUnit = "{request}"
- HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
-
- // HTTPServerRequestBodySize is the metric conforming to the
- // "http.server.request.body.size" semantic conventions. It represents the size
- // of HTTP server request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerRequestBodySizeName = "http.server.request.body.size"
- HTTPServerRequestBodySizeUnit = "By"
- HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
-
- // HTTPServerResponseBodySize is the metric conforming to the
- // "http.server.response.body.size" semantic conventions. It represents the
- // size of HTTP server response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerResponseBodySizeName = "http.server.response.body.size"
- HTTPServerResponseBodySizeUnit = "By"
- HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
-
- // HTTPClientRequestDuration is the metric conforming to the
- // "http.client.request.duration" semantic conventions. It represents the
- // duration of HTTP client requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPClientRequestDurationName = "http.client.request.duration"
- HTTPClientRequestDurationUnit = "s"
- HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
-
- // HTTPClientRequestBodySize is the metric conforming to the
- // "http.client.request.body.size" semantic conventions. It represents the size
- // of HTTP client request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientRequestBodySizeName = "http.client.request.body.size"
- HTTPClientRequestBodySizeUnit = "By"
- HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
-
- // HTTPClientResponseBodySize is the metric conforming to the
- // "http.client.response.body.size" semantic conventions. It represents the
- // size of HTTP client response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientResponseBodySizeName = "http.client.response.body.size"
- HTTPClientResponseBodySizeUnit = "By"
- HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
-
- // HTTPClientOpenConnections is the metric conforming to the
- // "http.client.open_connections" semantic conventions. It represents the
- // number of outbound HTTP connections that are currently active or idle on the
- // client.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- HTTPClientOpenConnectionsName = "http.client.open_connections"
- HTTPClientOpenConnectionsUnit = "{connection}"
- HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
-
- // HTTPClientConnectionDuration is the metric conforming to the
- // "http.client.connection.duration" semantic conventions. It represents the
- // duration of the successfully established outbound HTTP connections.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- HTTPClientConnectionDurationName = "http.client.connection.duration"
- HTTPClientConnectionDurationUnit = "s"
- HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
-
- // HTTPClientActiveRequests is the metric conforming to the
- // "http.client.active_requests" semantic conventions. It represents the number
- // of active HTTP requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPClientActiveRequestsName = "http.client.active_requests"
- HTTPClientActiveRequestsUnit = "{request}"
- HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
-
- // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
- // conventions. It represents the measure of initial memory requested.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmMemoryInitName = "jvm.memory.init"
- JvmMemoryInitUnit = "By"
- JvmMemoryInitDescription = "Measure of initial memory requested."
-
- // JvmSystemCPUUtilization is the metric conforming to the
- // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
- // CPU utilization for the whole system as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
- JvmSystemCPUUtilizationUnit = "1"
- JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
-
- // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
- // semantic conventions. It represents the average CPU load of the whole system
- // for the last minute as reported by the JVM.
- // Instrument: gauge
- // Unit: {run_queue_item}
- // Stability: Experimental
- JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
- JvmSystemCPULoad1mUnit = "{run_queue_item}"
- JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
-
- // JvmBufferMemoryUsage is the metric conforming to the
- // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
- // memory used by buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
- JvmBufferMemoryUsageUnit = "By"
- JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
-
- // JvmBufferMemoryLimit is the metric conforming to the
- // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
- // total memory capacity of buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
- JvmBufferMemoryLimitUnit = "By"
- JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
-
- // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
- // conventions. It represents the number of buffers in the pool.
- // Instrument: updowncounter
- // Unit: {buffer}
- // Stability: Experimental
- JvmBufferCountName = "jvm.buffer.count"
- JvmBufferCountUnit = "{buffer}"
- JvmBufferCountDescription = "Number of buffers in the pool."
-
- // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
- // conventions. It represents the measure of memory used.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedName = "jvm.memory.used"
- JvmMemoryUsedUnit = "By"
- JvmMemoryUsedDescription = "Measure of memory used."
-
- // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
- // semantic conventions. It represents the measure of memory committed.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryCommittedName = "jvm.memory.committed"
- JvmMemoryCommittedUnit = "By"
- JvmMemoryCommittedDescription = "Measure of memory committed."
-
- // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
- // conventions. It represents the measure of max obtainable memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryLimitName = "jvm.memory.limit"
- JvmMemoryLimitUnit = "By"
- JvmMemoryLimitDescription = "Measure of max obtainable memory."
-
- // JvmMemoryUsedAfterLastGc is the metric conforming to the
- // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
- // measure of memory used, as measured after the most recent garbage collection
- // event on this pool.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
- JvmMemoryUsedAfterLastGcUnit = "By"
- JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
-
- // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
- // conventions. It represents the duration of JVM garbage collection actions.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- JvmGcDurationName = "jvm.gc.duration"
- JvmGcDurationUnit = "s"
- JvmGcDurationDescription = "Duration of JVM garbage collection actions."
-
- // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
- // conventions. It represents the number of executing platform threads.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Stable
- JvmThreadCountName = "jvm.thread.count"
- JvmThreadCountUnit = "{thread}"
- JvmThreadCountDescription = "Number of executing platform threads."
-
- // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
- // conventions. It represents the number of classes loaded since JVM start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassLoadedName = "jvm.class.loaded"
- JvmClassLoadedUnit = "{class}"
- JvmClassLoadedDescription = "Number of classes loaded since JVM start."
-
- // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
- // semantic conventions. It represents the number of classes unloaded since JVM
- // start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassUnloadedName = "jvm.class.unloaded"
- JvmClassUnloadedUnit = "{class}"
- JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
-
- // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
- // conventions. It represents the number of classes currently loaded.
- // Instrument: updowncounter
- // Unit: {class}
- // Stability: Stable
- JvmClassCountName = "jvm.class.count"
- JvmClassCountUnit = "{class}"
- JvmClassCountDescription = "Number of classes currently loaded."
-
- // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
- // conventions. It represents the number of processors available to the Java
- // virtual machine.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Stable
- JvmCPUCountName = "jvm.cpu.count"
- JvmCPUCountUnit = "{cpu}"
- JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
-
- // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
- // conventions. It represents the cPU time used by the process as reported by
- // the JVM.
- // Instrument: counter
- // Unit: s
- // Stability: Stable
- JvmCPUTimeName = "jvm.cpu.time"
- JvmCPUTimeUnit = "s"
- JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
-
- // JvmCPURecentUtilization is the metric conforming to the
- // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
- // CPU utilization for the process as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Stable
- JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
- JvmCPURecentUtilizationUnit = "1"
- JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
-
- // MessagingPublishDuration is the metric conforming to the
- // "messaging.publish.duration" semantic conventions. It represents the
- // measures the duration of publish operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingPublishDurationName = "messaging.publish.duration"
- MessagingPublishDurationUnit = "s"
- MessagingPublishDurationDescription = "Measures the duration of publish operation."
-
- // MessagingReceiveDuration is the metric conforming to the
- // "messaging.receive.duration" semantic conventions. It represents the
- // measures the duration of receive operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingReceiveDurationName = "messaging.receive.duration"
- MessagingReceiveDurationUnit = "s"
- MessagingReceiveDurationDescription = "Measures the duration of receive operation."
-
- // MessagingProcessDuration is the metric conforming to the
- // "messaging.process.duration" semantic conventions. It represents the
- // measures the duration of process operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingProcessDurationName = "messaging.process.duration"
- MessagingProcessDurationUnit = "s"
- MessagingProcessDurationDescription = "Measures the duration of process operation."
-
- // MessagingPublishMessages is the metric conforming to the
- // "messaging.publish.messages" semantic conventions. It represents the
- // measures the number of published messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingPublishMessagesName = "messaging.publish.messages"
- MessagingPublishMessagesUnit = "{message}"
- MessagingPublishMessagesDescription = "Measures the number of published messages."
-
- // MessagingReceiveMessages is the metric conforming to the
- // "messaging.receive.messages" semantic conventions. It represents the
- // measures the number of received messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingReceiveMessagesName = "messaging.receive.messages"
- MessagingReceiveMessagesUnit = "{message}"
- MessagingReceiveMessagesDescription = "Measures the number of received messages."
-
- // MessagingProcessMessages is the metric conforming to the
- // "messaging.process.messages" semantic conventions. It represents the
- // measures the number of processed messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingProcessMessagesName = "messaging.process.messages"
- MessagingProcessMessagesUnit = "{message}"
- MessagingProcessMessagesDescription = "Measures the number of processed messages."
-
- // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
- // conventions. It represents the total CPU seconds broken down by different
- // states.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ProcessCPUTimeName = "process.cpu.time"
- ProcessCPUTimeUnit = "s"
- ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
-
- // ProcessCPUUtilization is the metric conforming to the
- // "process.cpu.utilization" semantic conventions. It represents the difference
- // in process.cpu.time since the last measurement, divided by the elapsed time
- // and number of CPUs available to the process.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- ProcessCPUUtilizationName = "process.cpu.utilization"
- ProcessCPUUtilizationUnit = "1"
- ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
-
- // ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
- // semantic conventions. It represents the amount of physical memory in use.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryUsageName = "process.memory.usage"
- ProcessMemoryUsageUnit = "By"
- ProcessMemoryUsageDescription = "The amount of physical memory in use."
-
- // ProcessMemoryVirtual is the metric conforming to the
- // "process.memory.virtual" semantic conventions. It represents the amount of
- // committed virtual memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryVirtualName = "process.memory.virtual"
- ProcessMemoryVirtualUnit = "By"
- ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
-
- // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
- // conventions. It represents the disk bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessDiskIoName = "process.disk.io"
- ProcessDiskIoUnit = "By"
- ProcessDiskIoDescription = "Disk bytes transferred."
-
- // ProcessNetworkIo is the metric conforming to the "process.network.io"
- // semantic conventions. It represents the network bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessNetworkIoName = "process.network.io"
- ProcessNetworkIoUnit = "By"
- ProcessNetworkIoDescription = "Network bytes transferred."
-
- // ProcessThreadCount is the metric conforming to the "process.thread.count"
- // semantic conventions. It represents the process threads count.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Experimental
- ProcessThreadCountName = "process.thread.count"
- ProcessThreadCountUnit = "{thread}"
- ProcessThreadCountDescription = "Process threads count."
-
- // ProcessOpenFileDescriptorCount is the metric conforming to the
- // "process.open_file_descriptor.count" semantic conventions. It represents the
- // number of file descriptors in use by the process.
- // Instrument: updowncounter
- // Unit: {count}
- // Stability: Experimental
- ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count"
- ProcessOpenFileDescriptorCountUnit = "{count}"
- ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
-
- // ProcessContextSwitches is the metric conforming to the
- // "process.context_switches" semantic conventions. It represents the number of
- // times the process has been context switched.
- // Instrument: counter
- // Unit: {count}
- // Stability: Experimental
- ProcessContextSwitchesName = "process.context_switches"
- ProcessContextSwitchesUnit = "{count}"
- ProcessContextSwitchesDescription = "Number of times the process has been context switched."
-
- // ProcessPagingFaults is the metric conforming to the "process.paging.faults"
- // semantic conventions. It represents the number of page faults the process
- // has made.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- ProcessPagingFaultsName = "process.paging.faults"
- ProcessPagingFaultsUnit = "{fault}"
- ProcessPagingFaultsDescription = "Number of page faults the process has made."
-
- // RPCServerDuration is the metric conforming to the "rpc.server.duration"
- // semantic conventions. It represents the measures the duration of inbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCServerDurationName = "rpc.server.duration"
- RPCServerDurationUnit = "ms"
- RPCServerDurationDescription = "Measures the duration of inbound RPC."
-
- // RPCServerRequestSize is the metric conforming to the
- // "rpc.server.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerRequestSizeName = "rpc.server.request.size"
- RPCServerRequestSizeUnit = "By"
- RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCServerResponseSize is the metric conforming to the
- // "rpc.server.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerResponseSizeName = "rpc.server.response.size"
- RPCServerResponseSizeUnit = "By"
- RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCServerRequestsPerRPC is the metric conforming to the
- // "rpc.server.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
- RPCServerRequestsPerRPCUnit = "{count}"
- RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCServerResponsesPerRPC is the metric conforming to the
- // "rpc.server.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
- RPCServerResponsesPerRPCUnit = "{count}"
- RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // RPCClientDuration is the metric conforming to the "rpc.client.duration"
- // semantic conventions. It represents the measures the duration of outbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCClientDurationName = "rpc.client.duration"
- RPCClientDurationUnit = "ms"
- RPCClientDurationDescription = "Measures the duration of outbound RPC."
-
- // RPCClientRequestSize is the metric conforming to the
- // "rpc.client.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientRequestSizeName = "rpc.client.request.size"
- RPCClientRequestSizeUnit = "By"
- RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCClientResponseSize is the metric conforming to the
- // "rpc.client.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientResponseSizeName = "rpc.client.response.size"
- RPCClientResponseSizeUnit = "By"
- RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCClientRequestsPerRPC is the metric conforming to the
- // "rpc.client.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
- RPCClientRequestsPerRPCUnit = "{count}"
- RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCClientResponsesPerRPC is the metric conforming to the
- // "rpc.client.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
- RPCClientResponsesPerRPCUnit = "{count}"
- RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
- // conventions. It represents the seconds each logical CPU spent on each mode.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemCPUTimeName = "system.cpu.time"
- SystemCPUTimeUnit = "s"
- SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
-
- // SystemCPUUtilization is the metric conforming to the
- // "system.cpu.utilization" semantic conventions. It represents the difference
- // in system.cpu.time since the last measurement, divided by the elapsed time
- // and number of logical CPUs.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- SystemCPUUtilizationName = "system.cpu.utilization"
- SystemCPUUtilizationUnit = "1"
- SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
-
- // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
- // semantic conventions. It represents the reports the current frequency of the
- // CPU in Hz.
- // Instrument: gauge
- // Unit: {Hz}
- // Stability: Experimental
- SystemCPUFrequencyName = "system.cpu.frequency"
- SystemCPUFrequencyUnit = "{Hz}"
- SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
-
- // SystemCPUPhysicalCount is the metric conforming to the
- // "system.cpu.physical.count" semantic conventions. It represents the reports
- // the number of actual physical processor cores on the hardware.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPUPhysicalCountName = "system.cpu.physical.count"
- SystemCPUPhysicalCountUnit = "{cpu}"
- SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
-
- // SystemCPULogicalCount is the metric conforming to the
- // "system.cpu.logical.count" semantic conventions. It represents the reports
- // the number of logical (virtual) processor cores created by the operating
- // system to manage multitasking.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPULogicalCountName = "system.cpu.logical.count"
- SystemCPULogicalCountUnit = "{cpu}"
- SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
-
- // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
- // semantic conventions. It represents the reports memory in use by state.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryUsageName = "system.memory.usage"
- SystemMemoryUsageUnit = "By"
- SystemMemoryUsageDescription = "Reports memory in use by state."
-
- // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
- // semantic conventions. It represents the total memory available in the
- // system.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryLimitName = "system.memory.limit"
- SystemMemoryLimitUnit = "By"
- SystemMemoryLimitDescription = "Total memory available in the system."
-
- // SystemMemoryShared is the metric conforming to the "system.memory.shared"
- // semantic conventions. It represents the shared memory used (mostly by
- // tmpfs).
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemorySharedName = "system.memory.shared"
- SystemMemorySharedUnit = "By"
- SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
-
- // SystemMemoryUtilization is the metric conforming to the
- // "system.memory.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemMemoryUtilizationName = "system.memory.utilization"
- SystemMemoryUtilizationUnit = "1"
-
- // SystemPagingUsage is the metric conforming to the "system.paging.usage"
- // semantic conventions. It represents the unix swap or windows pagefile usage.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemPagingUsageName = "system.paging.usage"
- SystemPagingUsageUnit = "By"
- SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
-
- // SystemPagingUtilization is the metric conforming to the
- // "system.paging.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingUtilizationName = "system.paging.utilization"
- SystemPagingUtilizationUnit = "1"
-
- // SystemPagingFaults is the metric conforming to the "system.paging.faults"
- // semantic conventions.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingFaultsName = "system.paging.faults"
- SystemPagingFaultsUnit = "{fault}"
-
- // SystemPagingOperations is the metric conforming to the
- // "system.paging.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingOperationsName = "system.paging.operations"
- SystemPagingOperationsUnit = "{operation}"
-
- // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskIoName = "system.disk.io"
- SystemDiskIoUnit = "By"
-
- // SystemDiskOperations is the metric conforming to the
- // "system.disk.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskOperationsName = "system.disk.operations"
- SystemDiskOperationsUnit = "{operation}"
-
- // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
- // semantic conventions. It represents the time disk spent activated.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskIoTimeName = "system.disk.io_time"
- SystemDiskIoTimeUnit = "s"
- SystemDiskIoTimeDescription = "Time disk spent activated"
-
- // SystemDiskOperationTime is the metric conforming to the
- // "system.disk.operation_time" semantic conventions. It represents the sum of
- // the time each operation took to complete.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskOperationTimeName = "system.disk.operation_time"
- SystemDiskOperationTimeUnit = "s"
- SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
-
- // SystemDiskMerged is the metric conforming to the "system.disk.merged"
- // semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskMergedName = "system.disk.merged"
- SystemDiskMergedUnit = "{operation}"
-
- // SystemFilesystemUsage is the metric conforming to the
- // "system.filesystem.usage" semantic conventions.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUsageName = "system.filesystem.usage"
- SystemFilesystemUsageUnit = "By"
-
- // SystemFilesystemUtilization is the metric conforming to the
- // "system.filesystem.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUtilizationName = "system.filesystem.utilization"
- SystemFilesystemUtilizationUnit = "1"
-
- // SystemNetworkDropped is the metric conforming to the
- // "system.network.dropped" semantic conventions. It represents the count of
- // packets that are dropped or discarded even though there was no error.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- SystemNetworkDroppedName = "system.network.dropped"
- SystemNetworkDroppedUnit = "{packet}"
- SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
-
- // SystemNetworkPackets is the metric conforming to the
- // "system.network.packets" semantic conventions.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkPacketsName = "system.network.packets"
- SystemNetworkPacketsUnit = "{packet}"
-
- // SystemNetworkErrors is the metric conforming to the "system.network.errors"
- // semantic conventions. It represents the count of network errors detected.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- SystemNetworkErrorsName = "system.network.errors"
- SystemNetworkErrorsUnit = "{error}"
- SystemNetworkErrorsDescription = "Count of network errors detected"
-
- // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkIoName = "system.network.io"
- SystemNetworkIoUnit = "By"
-
- // SystemNetworkConnections is the metric conforming to the
- // "system.network.connections" semantic conventions.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkConnectionsName = "system.network.connections"
- SystemNetworkConnectionsUnit = "{connection}"
-
- // SystemProcessCount is the metric conforming to the "system.process.count"
- // semantic conventions. It represents the total number of processes in each
- // state.
- // Instrument: updowncounter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCountName = "system.process.count"
- SystemProcessCountUnit = "{process}"
- SystemProcessCountDescription = "Total number of processes in each state"
-
- // SystemProcessCreated is the metric conforming to the
- // "system.process.created" semantic conventions. It represents the total
- // number of processes created over uptime of the host.
- // Instrument: counter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCreatedName = "system.process.created"
- SystemProcessCreatedUnit = "{process}"
- SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
-
- // SystemLinuxMemoryAvailable is the metric conforming to the
- // "system.linux.memory.available" semantic conventions. It represents an
- // estimate of how much memory is available for starting new applications,
- // without causing swapping.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemLinuxMemoryAvailableName = "system.linux.memory.available"
- SystemLinuxMemoryAvailableUnit = "By"
- SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
index 666bded4..267979c0 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
@@ -4,28 +4,53 @@
package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
import (
- "fmt"
"reflect"
"go.opentelemetry.io/otel/attribute"
)
// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
+//
+// If err is nil, the returned attribute has the default value
+// [ErrorTypeOther].
+//
+// If err's type has the method
+//
+// ErrorType() string
+//
+// then the returned attribute has the value of err.ErrorType(). Otherwise, the
+// returned attribute has a value derived from the concrete type of err.
+//
+// The key of the returned attribute is [ErrorTypeKey].
func ErrorType(err error) attribute.KeyValue {
if err == nil {
return ErrorTypeOther
}
- t := reflect.TypeOf(err)
- var value string
- if t.PkgPath() == "" && t.Name() == "" {
- // Likely a builtin type.
- value = t.String()
- } else {
- value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+
+ return ErrorTypeKey.String(errorType(err))
+}
+
+func errorType(err error) string {
+ var s string
+ if et, ok := err.(interface{ ErrorType() string }); ok {
+ // Prioritize the ErrorType method if available.
+ s = et.ErrorType()
}
+ if s == "" {
+ // Fallback to reflection if the ErrorType method is not supported or
+ // returns an empty value.
- if value == "" {
- return ErrorTypeOther
+ t := reflect.TypeOf(err)
+ pkg, name := t.PkgPath(), t.Name()
+ if pkg != "" && name != "" {
+ s = pkg + "." + name
+ } else {
+ // The type has no package path or name (predeclared, not-defined,
+ // or alias for a not-defined type).
+ //
+ // This is not guaranteed to be unique, but is a best effort.
+ s = t.String()
+ }
}
- return ErrorTypeKey.String(value)
+ return s
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/MIGRATION.md
new file mode 100644
index 00000000..fed7013e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/MIGRATION.md
@@ -0,0 +1,78 @@
+
+# Migration from v1.38.0 to v1.39.0
+
+The `go.opentelemetry.io/otel/semconv/v1.39.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.38.0` with the following exceptions.
+
+## Removed
+
+The following declarations have been removed.
+Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions.
+
+If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use.
+If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case.
+
+- `LinuxMemorySlabStateKey`
+- `LinuxMemorySlabStateReclaimable`
+- `LinuxMemorySlabStateUnreclaimable`
+- `PeerService`
+- `PeerServiceKey`
+- `RPCConnectRPCErrorCodeAborted`
+- `RPCConnectRPCErrorCodeAlreadyExists`
+- `RPCConnectRPCErrorCodeCancelled`
+- `RPCConnectRPCErrorCodeDataLoss`
+- `RPCConnectRPCErrorCodeDeadlineExceeded`
+- `RPCConnectRPCErrorCodeFailedPrecondition`
+- `RPCConnectRPCErrorCodeInternal`
+- `RPCConnectRPCErrorCodeInvalidArgument`
+- `RPCConnectRPCErrorCodeKey`
+- `RPCConnectRPCErrorCodeNotFound`
+- `RPCConnectRPCErrorCodeOutOfRange`
+- `RPCConnectRPCErrorCodePermissionDenied`
+- `RPCConnectRPCErrorCodeResourceExhausted`
+- `RPCConnectRPCErrorCodeUnauthenticated`
+- `RPCConnectRPCErrorCodeUnavailable`
+- `RPCConnectRPCErrorCodeUnimplemented`
+- `RPCConnectRPCErrorCodeUnknown`
+- `RPCConnectRPCRequestMetadata`
+- `RPCConnectRPCResponseMetadata`
+- `RPCGRPCRequestMetadata`
+- `RPCGRPCResponseMetadata`
+- `RPCGRPCStatusCodeAborted`
+- `RPCGRPCStatusCodeAlreadyExists`
+- `RPCGRPCStatusCodeCancelled`
+- `RPCGRPCStatusCodeDataLoss`
+- `RPCGRPCStatusCodeDeadlineExceeded`
+- `RPCGRPCStatusCodeFailedPrecondition`
+- `RPCGRPCStatusCodeInternal`
+- `RPCGRPCStatusCodeInvalidArgument`
+- `RPCGRPCStatusCodeKey`
+- `RPCGRPCStatusCodeNotFound`
+- `RPCGRPCStatusCodeOk`
+- `RPCGRPCStatusCodeOutOfRange`
+- `RPCGRPCStatusCodePermissionDenied`
+- `RPCGRPCStatusCodeResourceExhausted`
+- `RPCGRPCStatusCodeUnauthenticated`
+- `RPCGRPCStatusCodeUnavailable`
+- `RPCGRPCStatusCodeUnimplemented`
+- `RPCGRPCStatusCodeUnknown`
+- `RPCJSONRPCErrorCode`
+- `RPCJSONRPCErrorCodeKey`
+- `RPCJSONRPCErrorMessage`
+- `RPCJSONRPCErrorMessageKey`
+- `RPCJSONRPCRequestID`
+- `RPCJSONRPCRequestIDKey`
+- `RPCJSONRPCVersion`
+- `RPCJSONRPCVersionKey`
+- `RPCService`
+- `RPCServiceKey`
+- `RPCSystemApacheDubbo`
+- `RPCSystemConnectRPC`
+- `RPCSystemDotnetWcf`
+- `RPCSystemGRPC`
+- `RPCSystemJSONRPC`
+- `RPCSystemJavaRmi`
+- `RPCSystemKey`
+- `RPCSystemOncRPC`
+
+[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions
+[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/README.md
new file mode 100644
index 00000000..4b0e6f7f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.39.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.39.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/attribute_group.go
new file mode 100644
index 00000000..080365fc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/attribute_group.go
@@ -0,0 +1,16239 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Namespace: android
+const (
+ // AndroidAppStateKey is the attribute Key conforming to the "android.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "created"
+ // Note: The Android lifecycle states are defined in
+ // [Activity lifecycle callbacks], and from which the `OS identifiers` are
+ // derived.
+ //
+ // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc
+ AndroidAppStateKey = attribute.Key("android.app.state")
+
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version (`os.version`) of
+ // the android operating system. More information can be found in the
+ // [Android API levels documentation].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "33", "32"
+ //
+ // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found in the
+// [Android API levels documentation].
+//
+// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// Enum values for android.app.state
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called in the app for the first time.
+ //
+ // Stability: development
+ AndroidAppStateCreated = AndroidAppStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity,
+ // Context.stopService() has been called when the app was in the foreground
+ // state.
+ //
+ // Stability: development
+ AndroidAppStateBackground = AndroidAppStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called when the app was in either the created
+ // or background states.
+ //
+ // Stability: development
+ AndroidAppStateForeground = AndroidAppStateKey.String("foreground")
+)
+
+// Namespace: app
+const (
+ // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic
+ // conventions. It represents the unique identifier for a particular build or
+ // compilation of the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0",
+ // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123"
+ AppBuildIDKey = attribute.Key("app.build_id")
+
+ // AppInstallationIDKey is the attribute Key conforming to the
+ // "app.installation.id" semantic conventions. It represents a unique identifier
+ // representing the installation of an application on a specific device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092"
+ // Note: Its value SHOULD persist across launches of the same application
+ // installation, including through application upgrades.
+ // It SHOULD change if the application is uninstalled or if all applications of
+ // the vendor are uninstalled.
+ // Additionally, users might be able to reset this value (e.g. by clearing
+ // application data).
+ // If an app is installed multiple times on the same device (e.g. in different
+ // accounts on Android), each `app.installation.id` SHOULD have a different
+ // value.
+ // If multiple OpenTelemetry SDKs are used within the same application, they
+ // SHOULD use the same value for `app.installation.id`.
+ // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the
+ // `app.installation.id`.
+ //
+ // For iOS, this value SHOULD be equal to the [vendor identifier].
+ //
+ // For Android, examples of `app.installation.id` implementations include:
+ //
+ // - [Firebase Installation ID].
+ // - A globally unique UUID which is persisted across sessions in your
+ // application.
+ // - [App set ID].
+ // - [`Settings.getString(Settings.Secure.ANDROID_ID)`].
+ //
+ // More information about Android identifier best practices can be found in the
+ // [Android user data IDs guide].
+ //
+ // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor
+ // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations
+ // [App set ID]: https://developer.android.com/identity/app-set-id
+ // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID
+ // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids
+ AppInstallationIDKey = attribute.Key("app.installation.id")
+
+ // AppJankFrameCountKey is the attribute Key conforming to the
+ // "app.jank.frame_count" semantic conventions. It represents a number of frame
+ // renders that experienced jank.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 9, 42
+ // Note: Depending on platform limitations, the value provided MAY be
+ // approximation.
+ AppJankFrameCountKey = attribute.Key("app.jank.frame_count")
+
+ // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period"
+ // semantic conventions. It represents the time period, in seconds, for which
+ // this jank is being reported.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 5.0, 10.24
+ AppJankPeriodKey = attribute.Key("app.jank.period")
+
+ // AppJankThresholdKey is the attribute Key conforming to the
+ // "app.jank.threshold" semantic conventions. It represents the minimum
+ // rendering threshold for this jank, in seconds.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.016, 0.7, 1.024
+ AppJankThresholdKey = attribute.Key("app.jank.threshold")
+
+ // AppScreenCoordinateXKey is the attribute Key conforming to the
+ // "app.screen.coordinate.x" semantic conventions. It represents the x
+ // (horizontal) coordinate of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 131
+ AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x")
+
+ // AppScreenCoordinateYKey is the attribute Key conforming to the
+ // "app.screen.coordinate.y" semantic conventions. It represents the y
+ // (vertical) component of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12, 99
+ AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y")
+
+ // AppScreenIDKey is the attribute Key conforming to the "app.screen.id"
+ // semantic conventions. It represents an identifier that uniquely
+ // differentiates this screen from other screens in the same application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3",
+ // "com.example.app.MainActivity", "com.example.shop.ProductDetailFragment",
+ // "MyApp.ProfileView", "MyApp.ProfileViewController"
+ // Note: A screen represents only the part of the device display drawn by the
+ // app. It typically contains multiple widgets or UI components and is larger in
+ // scope than individual widgets. Multiple screens can coexist on the same
+ // display simultaneously (e.g., split view on tablets).
+ AppScreenIDKey = attribute.Key("app.screen.id")
+
+ // AppScreenNameKey is the attribute Key conforming to the "app.screen.name"
+ // semantic conventions. It represents the name of an application screen.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MainActivity", "ProductDetailFragment", "ProfileView",
+ // "ProfileViewController"
+ // Note: A screen represents only the part of the device display drawn by the
+ // app. It typically contains multiple widgets or UI components and is larger in
+ // scope than individual widgets. Multiple screens can coexist on the same
+ // display simultaneously (e.g., split view on tablets).
+ AppScreenNameKey = attribute.Key("app.screen.name")
+
+ // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id"
+ // semantic conventions. It represents an identifier that uniquely
+ // differentiates this widget from other widgets in the same application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetIDKey = attribute.Key("app.widget.id")
+
+ // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name"
+ // semantic conventions. It represents the name of an application widget.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "submit", "attack", "Clear Cart"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetNameKey = attribute.Key("app.widget.name")
+)
+
+// AppBuildID returns an attribute KeyValue conforming to the "app.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the application.
+func AppBuildID(val string) attribute.KeyValue {
+ return AppBuildIDKey.String(val)
+}
+
+// AppInstallationID returns an attribute KeyValue conforming to the
+// "app.installation.id" semantic conventions. It represents a unique identifier
+// representing the installation of an application on a specific device.
+func AppInstallationID(val string) attribute.KeyValue {
+ return AppInstallationIDKey.String(val)
+}
+
+// AppJankFrameCount returns an attribute KeyValue conforming to the
+// "app.jank.frame_count" semantic conventions. It represents a number of frame
+// renders that experienced jank.
+func AppJankFrameCount(val int) attribute.KeyValue {
+ return AppJankFrameCountKey.Int(val)
+}
+
+// AppJankPeriod returns an attribute KeyValue conforming to the
+// "app.jank.period" semantic conventions. It represents the time period, in
+// seconds, for which this jank is being reported.
+func AppJankPeriod(val float64) attribute.KeyValue {
+ return AppJankPeriodKey.Float64(val)
+}
+
+// AppJankThreshold returns an attribute KeyValue conforming to the
+// "app.jank.threshold" semantic conventions. It represents the minimum rendering
+// threshold for this jank, in seconds.
+func AppJankThreshold(val float64) attribute.KeyValue {
+ return AppJankThresholdKey.Float64(val)
+}
+
+// AppScreenCoordinateX returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.x" semantic conventions. It represents the x
+// (horizontal) coordinate of a screen coordinate, in screen pixels.
+func AppScreenCoordinateX(val int) attribute.KeyValue {
+ return AppScreenCoordinateXKey.Int(val)
+}
+
+// AppScreenCoordinateY returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical)
+// component of a screen coordinate, in screen pixels.
+func AppScreenCoordinateY(val int) attribute.KeyValue {
+ return AppScreenCoordinateYKey.Int(val)
+}
+
+// AppScreenID returns an attribute KeyValue conforming to the "app.screen.id"
+// semantic conventions. It represents an identifier that uniquely differentiates
+// this screen from other screens in the same application.
+func AppScreenID(val string) attribute.KeyValue {
+ return AppScreenIDKey.String(val)
+}
+
+// AppScreenName returns an attribute KeyValue conforming to the
+// "app.screen.name" semantic conventions. It represents the name of an
+// application screen.
+func AppScreenName(val string) attribute.KeyValue {
+ return AppScreenNameKey.String(val)
+}
+
+// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id"
+// semantic conventions. It represents an identifier that uniquely differentiates
+// this widget from other widgets in the same application.
+func AppWidgetID(val string) attribute.KeyValue {
+ return AppWidgetIDKey.String(val)
+}
+
+// AppWidgetName returns an attribute KeyValue conforming to the
+// "app.widget.name" semantic conventions. It represents the name of an
+// application widget.
+func AppWidgetName(val string) attribute.KeyValue {
+ return AppWidgetNameKey.String(val)
+}
+
+// Namespace: artifact
+const (
+ // ArtifactAttestationFilenameKey is the attribute Key conforming to the
+ // "artifact.attestation.filename" semantic conventions. It represents the
+ // provenance filename of the built attestation which directly relates to the
+ // build artifact filename. This filename SHOULD accompany the artifact at
+ // publish time. See the [SLSA Relationship] specification for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0.attestation",
+ // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation",
+ // "file-name-package.tar.gz.intoto.json1"
+ //
+ // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+ ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename")
+
+ // ArtifactAttestationHashKey is the attribute Key conforming to the
+ // "artifact.attestation.hash" semantic conventions. It represents the full
+ // [hash value (see glossary)], of the built attestation. Some envelopes in the
+ // [software attestation space] also refer to this as the **digest**.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408"
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+ ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash")
+
+ // ArtifactAttestationIDKey is the attribute Key conforming to the
+ // "artifact.attestation.id" semantic conventions. It represents the id of the
+ // build [software attestation].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ //
+ // [software attestation]: https://slsa.dev/attestation-model
+ ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id")
+
+ // ArtifactFilenameKey is the attribute Key conforming to the
+ // "artifact.filename" semantic conventions. It represents the human readable
+ // file name of the artifact, typically generated during build and release
+ // processes. Often includes the package name and version in the file name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0",
+ // "release-1.tar.gz", "file-name-package.tar.gz"
+ // Note: This file name can also act as the [Package Name]
+ // in cases where the package ecosystem maps accordingly.
+ // Additionally, the artifact [can be published]
+ // for others, but that is not a guarantee.
+ //
+ // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model
+ // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain
+ ArtifactFilenameKey = attribute.Key("artifact.filename")
+
+ // ArtifactHashKey is the attribute Key conforming to the "artifact.hash"
+ // semantic conventions. It represents the full [hash value (see glossary)],
+ // often found in checksum.txt on a release of the artifact and used to verify
+ // package integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9"
+ // Note: The specific algorithm used to create the cryptographic hash value is
+ // not defined. In situations where an artifact has multiple
+ // cryptographic hashes, it is up to the implementer to choose which
+ // hash value to set here; this should be the most secure hash algorithm
+ // that is suitable for the situation and consistent with the
+ // corresponding attestation. The implementer can then provide the other
+ // hash values through an additional set of attribute extensions as they
+ // deem necessary.
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ ArtifactHashKey = attribute.Key("artifact.hash")
+
+ // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl"
+ // semantic conventions. It represents the [Package URL] of the
+ // [package artifact] provides a standard way to identify and locate the
+ // packaged artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pkg:github/package-url/purl-spec@1209109710924",
+ // "pkg:npm/foo@12.12.3"
+ //
+ // [Package URL]: https://github.com/package-url/purl-spec
+ // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+ ArtifactPurlKey = attribute.Key("artifact.purl")
+
+ // ArtifactVersionKey is the attribute Key conforming to the "artifact.version"
+ // semantic conventions. It represents the version of the artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "v0.1.0", "1.2.1", "122691-build"
+ ArtifactVersionKey = attribute.Key("artifact.version")
+)
+
+// ArtifactAttestationFilename returns an attribute KeyValue conforming to the
+// "artifact.attestation.filename" semantic conventions. It represents the
+// provenance filename of the built attestation which directly relates to the
+// build artifact filename. This filename SHOULD accompany the artifact at
+// publish time. See the [SLSA Relationship] specification for more information.
+//
+// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+func ArtifactAttestationFilename(val string) attribute.KeyValue {
+ return ArtifactAttestationFilenameKey.String(val)
+}
+
+// ArtifactAttestationHash returns an attribute KeyValue conforming to the
+// "artifact.attestation.hash" semantic conventions. It represents the full
+// [hash value (see glossary)], of the built attestation. Some envelopes in the
+// [software attestation space] also refer to this as the **digest**.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+func ArtifactAttestationHash(val string) attribute.KeyValue {
+ return ArtifactAttestationHashKey.String(val)
+}
+
+// ArtifactAttestationID returns an attribute KeyValue conforming to the
+// "artifact.attestation.id" semantic conventions. It represents the id of the
+// build [software attestation].
+//
+// [software attestation]: https://slsa.dev/attestation-model
+func ArtifactAttestationID(val string) attribute.KeyValue {
+ return ArtifactAttestationIDKey.String(val)
+}
+
+// ArtifactFilename returns an attribute KeyValue conforming to the
+// "artifact.filename" semantic conventions. It represents the human readable
+// file name of the artifact, typically generated during build and release
+// processes. Often includes the package name and version in the file name.
+func ArtifactFilename(val string) attribute.KeyValue {
+ return ArtifactFilenameKey.String(val)
+}
+
+// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash"
+// semantic conventions. It represents the full [hash value (see glossary)],
+// often found in checksum.txt on a release of the artifact and used to verify
+// package integrity.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+func ArtifactHash(val string) attribute.KeyValue {
+ return ArtifactHashKey.String(val)
+}
+
+// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl"
+// semantic conventions. It represents the [Package URL] of the
+// [package artifact] provides a standard way to identify and locate the packaged
+// artifact.
+//
+// [Package URL]: https://github.com/package-url/purl-spec
+// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+func ArtifactPurl(val string) attribute.KeyValue {
+ return ArtifactPurlKey.String(val)
+}
+
+// ArtifactVersion returns an attribute KeyValue conforming to the
+// "artifact.version" semantic conventions. It represents the version of the
+// artifact.
+func ArtifactVersion(val string) attribute.KeyValue {
+ return ArtifactVersionKey.String(val)
+}
+
+// Namespace: aws
+const (
+ // AWSBedrockGuardrailIDKey is the attribute Key conforming to the
+ // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+ // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+ // prevent unwanted behavior from model responses or user messages.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "sgi5gkybzqak"
+ //
+ // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+ AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id")
+
+ // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the
+ // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the
+ // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a
+ // bank of information that can be queried by models to generate more relevant
+ // responses and augment prompts.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "XFWUPB9PAW"
+ //
+ // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+ AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id")
+
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the
+ // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `AttributeDefinitions` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "AttributeName": "string", "AttributeType": "string" }"
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lives", "id"
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+ // of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+ // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+ // "string", "WriteCapacityUnits": number }"
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of the
+ // `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+ // value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "CatsTable"
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }"
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `GlobalSecondaryIndexes`
+ // request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }"
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value of
+ // the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "name_to_group"
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the
+ // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+ // the JSON-serialized value of the `ItemCollectionMetrics` response field.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+ // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+ // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+ // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }"
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of the
+ // `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes":
+ // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" } }"
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value of
+ // the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems,
+ // ProductReviews"
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+ // parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+ // the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+ // the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of the
+ // `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of the
+ // `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ALL_ATTRIBUTES", "COUNT"
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the number of
+ // items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+ // the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "Cats"
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the value
+ // of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS cluster].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ //
+ // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container instance].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9"
+ //
+ // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch type]
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn"
+ // semantic conventions. It represents the ARN of a running [ECS task].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ //
+ // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the family name of
+ // the [ECS task definition] used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-family"
+ //
+ // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+ // semantic conventions. It represents the ID of a running ECS task. The ID MUST
+ // be extracted from `task.arn`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision for
+ // the task definition used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "8", "26"
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+
+ // AWSExtendedRequestIDKey is the attribute Key conforming to the
+ // "aws.extended_request_id" semantic conventions. It represents the AWS
+ // extended request ID as returned in the response header `x-amz-id-2`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ="
+ AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id")
+
+ // AWSKinesisStreamNameKey is the attribute Key conforming to the
+ // "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+ // AWS Kinesis [stream] the request refers to. Corresponds to the
+ // `--stream-name` parameter of the Kinesis [describe-stream] operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-stream-name"
+ //
+ // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+ // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+ AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name")
+
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+ // ARN as provided on the `Context` passed to the function (
+ // `Lambda-Runtime-Invoked-Function-Arn` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias"
+ // Note: This may be different from `cloud.resource_id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+
+ // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the
+ // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+ // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+ // function. It's contents are read by Lambda and used to trigger a function.
+ // This isn't available in the lambda execution context or the lambda runtime
+ // environtment. This is going to be populated by the AWS SDK for each language
+ // when that UUID is present. Some of these operations are
+ // Create/Delete/Get/List/Update EventSourceMapping.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7"
+ //
+ // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+ AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+ // Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*"
+ // Note: See the [log group ARN format documentation].
+ //
+ // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of the
+ // AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/aws/lambda/my-function", "opentelemetry-service"
+ // Note: Multiple log groups must be supported for cases like multi-container
+ // applications, where a single application has sidecar containers, and each
+ // write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+ // AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ // Note: See the [log stream ARN format documentation]. One log group can
+ // contain several log streams, so these ARNs necessarily identify both a log
+ // group and a log stream.
+ //
+ // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+ // AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in the
+ // response headers `x-amzn-requestid`, `x-amzn-request-id` or
+ // `x-amz-request-id`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ"
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request refers to.
+ // Corresponds to the `--bucket` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-bucket-name"
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source object
+ // (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3 API].
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [upload-part-copy]
+ //
+ //
+ // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean"
+ // Note: The `delete` attribute is only applicable to the [delete-object]
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3 API].
+ //
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `key` attribute is applicable to all object-related S3 operations,
+ // i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [delete-object]
+ // - [get-object]
+ // - [head-object]
+ // - [put-object]
+ // - [restore-object]
+ // - [select-object-content]
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [create-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html
+ // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html
+ // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html
+ // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html
+ // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number of
+ // the part being uploaded in a multipart-upload operation. This is a positive
+ // integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the [upload-part]
+ // and [upload-part-copy] operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter of
+ // the
+ // [upload-part operation within the S3 API].
+ //
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id"
+ // semantic conventions. It represents the upload ID that identifies the
+ // multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ"
+ // Note: The `upload_id` attribute applies to S3 multipart-upload operations and
+ // corresponds to the `--upload-id` parameter
+ // of the [S3 API] multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the
+ // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN
+ // of the Secret stored in the Secrets Mangger.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters"
+ AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn")
+
+ // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn"
+ // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon
+ // SNS [topic] is a logical access point that acts as a communication channel.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE"
+ //
+ // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+ AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn")
+
+ // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url"
+ // semantic conventions. It represents the URL of the AWS SQS Queue. It's a
+ // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is
+ // used to access the queue and perform actions on it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue"
+ AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url")
+
+ // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the
+ // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+ // of the AWS Step Functions Activity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting"
+ AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn")
+
+ // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the
+ // "aws.step_functions.state_machine.arn" semantic conventions. It represents
+ // the ARN of the AWS Step Functions State Machine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1"
+ AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn")
+)
+
+// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the
+// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+// prevent unwanted behavior from model responses or user messages.
+//
+// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+func AWSBedrockGuardrailID(val string) attribute.KeyValue {
+ return AWSBedrockGuardrailIDKey.String(val)
+}
+
+// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the
+// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of
+// information that can be queried by models to generate more relevant responses
+// and augment prompts.
+//
+// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue {
+ return AWSBedrockKnowledgeBaseIDKey.String(val)
+}
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents
+// the JSON-serialized value of each item in the `AttributeDefinitions` request
+// field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the
+// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value
+// of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the
+// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+// value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field.
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of the
+// `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to
+// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+// the JSON-serialized value of the `ItemCollectionMetrics` response field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+// field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of the
+// `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming
+// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+// the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the
+// `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value of
+// the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+// [ECS cluster].
+//
+// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container instance].
+//
+// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS task].
+//
+// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task definition] used to create the ECS task.
+//
+// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id"
+// semantic conventions. It represents the ID of a running ECS task. The ID MUST
+// be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+ return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// AWSExtendedRequestID returns an attribute KeyValue conforming to the
+// "aws.extended_request_id" semantic conventions. It represents the AWS extended
+// request ID as returned in the response header `x-amz-id-2`.
+func AWSExtendedRequestID(val string) attribute.KeyValue {
+ return AWSExtendedRequestIDKey.String(val)
+}
+
+// AWSKinesisStreamName returns an attribute KeyValue conforming to the
+// "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name`
+// parameter of the Kinesis [describe-stream] operation.
+//
+// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+func AWSKinesisStreamName(val string) attribute.KeyValue {
+ return AWSKinesisStreamNameKey.String(val)
+}
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+// ARN as provided on the `Context` passed to the function (
+// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next`
+// applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the
+// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+// function. It's contents are read by Lambda and used to trigger a function.
+// This isn't available in the lambda execution context or the lambda runtime
+// environtment. This is going to be populated by the AWS SDK for each language
+// when that UUID is present. Some of these operations are
+// Create/Delete/Get/List/Update EventSourceMapping.
+//
+// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+func AWSLambdaResourceMappingID(val string) attribute.KeyValue {
+ return AWSLambdaResourceMappingIDKey.String(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+// AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id"
+// semantic conventions. It represents the AWS request ID as returned in the
+// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id`
+// .
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket"
+// semantic conventions. It represents the S3 bucket name the request refers to.
+// Corresponds to the `--bucket` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object (in
+// the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete"
+// semantic conventions. It represents the delete request container that
+// specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic
+// conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the
+// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of
+// the Secret stored in the Secrets Mangger.
+func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue {
+ return AWSSecretsmanagerSecretARNKey.String(val)
+}
+
+// AWSSNSTopicARN returns an attribute KeyValue conforming to the
+// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS
+// Topic. An Amazon SNS [topic] is a logical access point that acts as a
+// communication channel.
+//
+// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+func AWSSNSTopicARN(val string) attribute.KeyValue {
+ return AWSSNSTopicARNKey.String(val)
+}
+
+// AWSSQSQueueURL returns an attribute KeyValue conforming to the
+// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS
+// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service
+// (SQS) and is used to access the queue and perform actions on it.
+func AWSSQSQueueURL(val string) attribute.KeyValue {
+ return AWSSQSQueueURLKey.String(val)
+}
+
+// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the
+// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+// of the AWS Step Functions Activity.
+func AWSStepFunctionsActivityARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsActivityARNKey.String(val)
+}
+
+// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to
+// the "aws.step_functions.state_machine.arn" semantic conventions. It represents
+// the ARN of the AWS Step Functions State Machine.
+func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsStateMachineARNKey.String(val)
+}
+
+// Enum values for aws.ecs.launchtype
+var (
+ // Amazon EC2
+ // Stability: development
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // Amazon Fargate
+ // Stability: development
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Namespace: azure
+const (
+ // AzureClientIDKey is the attribute Key conforming to the "azure.client.id"
+ // semantic conventions. It represents the unique identifier of the client
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1"
+ AzureClientIDKey = attribute.Key("azure.client.id")
+
+ // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.connection.mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode")
+
+ // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the
+ // "azure.cosmosdb.consistency.level" semantic conventions. It represents the
+ // account or request [consistency level].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong",
+ // "Session"
+ //
+ // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels
+ AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level")
+
+ // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to
+ // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It
+ // represents the list of regions contacted during operation in the order that
+ // they were contacted. If there is more than one region listed, it indicates
+ // that the operation was performed on multiple regions i.e. cross-regional
+ // call.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "North Central US", "Australia East", "Australia Southeast"
+ // Note: Region name matches the format of `displayName` in [Azure Location API]
+ //
+ // [Azure Location API]: https://learn.microsoft.com/rest/api/resources/subscriptions/list-locations
+ AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions")
+
+ // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents
+ // the number of request units consumed by the operation.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 46.18, 1.0
+ AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge")
+
+ // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+ // request payload size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size")
+
+ // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents
+ // the cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000, 1002
+ AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code")
+
+ // AzureResourceProviderNamespaceKey is the attribute Key conforming to the
+ // "azure.resource_provider.namespace" semantic conventions. It represents the
+ // [Azure Resource Provider Namespace] as recognized by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus"
+ //
+ // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+ AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace")
+
+ // AzureServiceRequestIDKey is the attribute Key conforming to the
+ // "azure.service.request.id" semantic conventions. It represents the unique
+ // identifier of the service request. It's generated by the Azure service and
+ // returned with the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00000000-0000-0000-0000-000000000000"
+ AzureServiceRequestIDKey = attribute.Key("azure.service.request.id")
+)
+
+// AzureClientID returns an attribute KeyValue conforming to the
+// "azure.client.id" semantic conventions. It represents the unique identifier of
+// the client instance.
+func AzureClientID(val string) attribute.KeyValue {
+ return AzureClientIDKey.String(val)
+}
+
+// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue
+// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic
+// conventions. It represents the list of regions contacted during operation in
+// the order that they were contacted. If there is more than one region listed,
+// it indicates that the operation was performed on multiple regions i.e.
+// cross-regional call.
+func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue {
+ return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val)
+}
+
+// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming
+// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It
+// represents the number of request units consumed by the operation.
+func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue {
+ return AzureCosmosDBOperationRequestChargeKey.Float64(val)
+}
+
+// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the
+// "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+// request payload size in bytes.
+func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue {
+ return AzureCosmosDBRequestBodySizeKey.Int(val)
+}
+
+// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to
+// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It
+// represents the cosmos DB sub status code.
+func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue {
+ return AzureCosmosDBResponseSubStatusCodeKey.Int(val)
+}
+
+// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the
+// "azure.resource_provider.namespace" semantic conventions. It represents the
+// [Azure Resource Provider Namespace] as recognized by the client.
+//
+// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+func AzureResourceProviderNamespace(val string) attribute.KeyValue {
+ return AzureResourceProviderNamespaceKey.String(val)
+}
+
+// AzureServiceRequestID returns an attribute KeyValue conforming to the
+// "azure.service.request.id" semantic conventions. It represents the unique
+// identifier of the service request. It's generated by the Azure service and
+// returned with the response.
+func AzureServiceRequestID(val string) attribute.KeyValue {
+ return AzureServiceRequestIDKey.String(val)
+}
+
+// Enum values for azure.cosmosdb.connection.mode
+var (
+ // Gateway (HTTP) connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct")
+)
+
+// Enum values for azure.cosmosdb.consistency.level
+var (
+ // Strong
+ // Stability: development
+ AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong")
+ // Bounded Staleness
+ // Stability: development
+ AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness")
+ // Session
+ // Stability: development
+ AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session")
+ // Eventual
+ // Stability: development
+ AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual")
+ // Consistent Prefix
+ // Stability: development
+ AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix")
+)
+
+// Namespace: browser
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.brands`).
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the "browser.language"
+ // semantic conventions. It represents the preferred language of the user using
+ // the browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "en", "en-US", "fr", "fr-FR"
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the browser is
+ // running on a mobile device.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be
+ // left unset.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the "browser.platform"
+ // semantic conventions. It represents the platform on which the browser is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Windows", "macOS", "Android"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
+ // be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the
+ // [W3C User-Agent Client Hints specification]. Note that some (but not all) of
+ // these values can overlap with values in the
+ // [`os.type` and `os.name` attributes]. However, for consistency, the values in
+ // the `browser.platform` attribute should capture the exact value that the user
+ // agent provides.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform
+ // [`os.type` and `os.name` attributes]: ./os.md
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands"
+// semantic conventions. It represents the array of brand name and version
+// separated by a space.
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred language
+// of the user using the browser.
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile"
+// semantic conventions. It represents a boolean that is true if the browser is
+// running on a mobile device.
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running.
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// Namespace: cassandra
+const (
+ // CassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "cassandra.consistency.level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from [CQL].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html
+ CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level")
+
+ // CassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "cassandra.coordinator.dc" semantic conventions. It represents the data
+ // center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: us-west-2
+ CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc")
+
+ // CassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+ // coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af
+ CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id")
+
+ // CassandraPageSizeKey is the attribute Key conforming to the
+ // "cassandra.page.size" semantic conventions. It represents the fetch size used
+ // for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 5000
+ CassandraPageSizeKey = attribute.Key("cassandra.page.size")
+
+ // CassandraQueryIdempotentKey is the attribute Key conforming to the
+ // "cassandra.query.idempotent" semantic conventions. It represents the whether
+ // or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent")
+
+ // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the
+ // "cassandra.speculative_execution.count" semantic conventions. It represents
+ // the number of times a query was speculatively executed. Not set or `0` if the
+ // query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 2
+ CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count")
+)
+
+// CassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.dc" semantic conventions. It represents the data center
+// of the coordinating node for a query.
+func CassandraCoordinatorDC(val string) attribute.KeyValue {
+ return CassandraCoordinatorDCKey.String(val)
+}
+
+// CassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+// coordinating node for a query.
+func CassandraCoordinatorID(val string) attribute.KeyValue {
+ return CassandraCoordinatorIDKey.String(val)
+}
+
+// CassandraPageSize returns an attribute KeyValue conforming to the
+// "cassandra.page.size" semantic conventions. It represents the fetch size used
+// for paging, i.e. how many rows will be returned at once.
+func CassandraPageSize(val int) attribute.KeyValue {
+ return CassandraPageSizeKey.Int(val)
+}
+
+// CassandraQueryIdempotent returns an attribute KeyValue conforming to the
+// "cassandra.query.idempotent" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func CassandraQueryIdempotent(val bool) attribute.KeyValue {
+ return CassandraQueryIdempotentKey.Bool(val)
+}
+
+// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to
+// the "cassandra.speculative_execution.count" semantic conventions. It
+// represents the number of times a query was speculatively executed. Not set or
+// `0` if the query was not executed speculatively.
+func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return CassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// Enum values for cassandra.consistency.level
+var (
+ // All
+ // Stability: development
+ CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all")
+ // Each Quorum
+ // Stability: development
+ CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum")
+ // Quorum
+ // Stability: development
+ CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum")
+ // Local Quorum
+ // Stability: development
+ CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum")
+ // One
+ // Stability: development
+ CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one")
+ // Two
+ // Stability: development
+ CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two")
+ // Three
+ // Stability: development
+ CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three")
+ // Local One
+ // Stability: development
+ CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one")
+ // Any
+ // Stability: development
+ CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any")
+ // Serial
+ // Stability: development
+ CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial")
+ // Local Serial
+ // Stability: development
+ CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Namespace: cicd
+const (
+ // CICDPipelineActionNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.action.name" semantic conventions. It represents the kind of
+ // action a pipeline run is performing.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BUILD", "RUN", "SYNC"
+ CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name")
+
+ // CICDPipelineNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.name" semantic conventions. It represents the human readable
+ // name of the pipeline within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Build and Test", "Lint", "Deploy Go Project",
+ // "deploy_to_environment"
+ CICDPipelineNameKey = attribute.Key("cicd.pipeline.name")
+
+ // CICDPipelineResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.result" semantic conventions. It represents the result of a
+ // pipeline run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineResultKey = attribute.Key("cicd.pipeline.result")
+
+ // CICDPipelineRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.id" semantic conventions. It represents the unique
+ // identifier of a pipeline run within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "120912"
+ CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id")
+
+ // CICDPipelineRunStateKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline
+ // run goes through these states during its lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pending", "executing", "finalizing"
+ CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state")
+
+ // CICDPipelineRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+ // the pipeline run, providing the complete address in order to locate and
+ // identify the pipeline run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full")
+
+ // CICDPipelineTaskNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.name" semantic conventions. It represents the human
+ // readable name of a task within a pipeline. Task here most closely aligns with
+ // a [computing process] in a pipeline. Other terms for tasks include commands,
+ // steps, and procedures.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary"
+ //
+ // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+ CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name")
+
+ // CICDPipelineTaskRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+ // identifier of a task run within a pipeline.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "12097"
+ CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id")
+
+ // CICDPipelineTaskRunResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.result" semantic conventions. It represents the
+ // result of a task run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result")
+
+ // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+ // [URL] of the pipeline task run, providing the complete address in order to
+ // locate and identify the pipeline task run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full")
+
+ // CICDPipelineTaskTypeKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.type" semantic conventions. It represents the type of the
+ // task within a pipeline.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "build", "test", "deploy"
+ CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type")
+
+ // CICDSystemComponentKey is the attribute Key conforming to the
+ // "cicd.system.component" semantic conventions. It represents the name of a
+ // component of the CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "controller", "scheduler", "agent"
+ CICDSystemComponentKey = attribute.Key("cicd.system.component")
+
+ // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id"
+ // semantic conventions. It represents the unique identifier of a worker within
+ // a CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "abc123", "10.0.1.2", "controller"
+ CICDWorkerIDKey = attribute.Key("cicd.worker.id")
+
+ // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name"
+ // semantic conventions. It represents the name of a worker within a CICD
+ // system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "agent-abc", "controller", "Ubuntu LTS"
+ CICDWorkerNameKey = attribute.Key("cicd.worker.name")
+
+ // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state"
+ // semantic conventions. It represents the state of a CICD worker / agent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle", "busy", "down"
+ CICDWorkerStateKey = attribute.Key("cicd.worker.state")
+
+ // CICDWorkerURLFullKey is the attribute Key conforming to the
+ // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+ // worker, providing the complete address in order to locate and identify the
+ // worker.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://cicd.example.org/worker/abc123"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full")
+)
+
+// CICDPipelineName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.name" semantic conventions. It represents the human readable
+// name of the pipeline within a CI/CD system.
+func CICDPipelineName(val string) attribute.KeyValue {
+ return CICDPipelineNameKey.String(val)
+}
+
+// CICDPipelineRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.id" semantic conventions. It represents the unique
+// identifier of a pipeline run within a CI/CD system.
+func CICDPipelineRunID(val string) attribute.KeyValue {
+ return CICDPipelineRunIDKey.String(val)
+}
+
+// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+// the pipeline run, providing the complete address in order to locate and
+// identify the pipeline run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineRunURLFullKey.String(val)
+}
+
+// CICDPipelineTaskName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.name" semantic conventions. It represents the human
+// readable name of a task within a pipeline. Task here most closely aligns with
+// a [computing process] in a pipeline. Other terms for tasks include commands,
+// steps, and procedures.
+//
+// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+func CICDPipelineTaskName(val string) attribute.KeyValue {
+ return CICDPipelineTaskNameKey.String(val)
+}
+
+// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+// identifier of a task run within a pipeline.
+func CICDPipelineTaskRunID(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunIDKey.String(val)
+}
+
+// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+// [URL] of the pipeline task run, providing the complete address in order to
+// locate and identify the pipeline task run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunURLFullKey.String(val)
+}
+
+// CICDSystemComponent returns an attribute KeyValue conforming to the
+// "cicd.system.component" semantic conventions. It represents the name of a
+// component of the CICD system.
+func CICDSystemComponent(val string) attribute.KeyValue {
+ return CICDSystemComponentKey.String(val)
+}
+
+// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id"
+// semantic conventions. It represents the unique identifier of a worker within a
+// CICD system.
+func CICDWorkerID(val string) attribute.KeyValue {
+ return CICDWorkerIDKey.String(val)
+}
+
+// CICDWorkerName returns an attribute KeyValue conforming to the
+// "cicd.worker.name" semantic conventions. It represents the name of a worker
+// within a CICD system.
+func CICDWorkerName(val string) attribute.KeyValue {
+ return CICDWorkerNameKey.String(val)
+}
+
+// CICDWorkerURLFull returns an attribute KeyValue conforming to the
+// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+// worker, providing the complete address in order to locate and identify the
+// worker.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDWorkerURLFull(val string) attribute.KeyValue {
+ return CICDWorkerURLFullKey.String(val)
+}
+
+// Enum values for cicd.pipeline.action.name
+var (
+ // The pipeline run is executing a build.
+ // Stability: development
+ CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD")
+ // The pipeline run is executing.
+ // Stability: development
+ CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN")
+ // The pipeline run is executing a sync.
+ // Stability: development
+ CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC")
+)
+
+// Enum values for cicd.pipeline.result
+var (
+ // The pipeline run finished successfully.
+ // Stability: development
+ CICDPipelineResultSuccess = CICDPipelineResultKey.String("success")
+ // The pipeline run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the pipeline run.
+ // Stability: development
+ CICDPipelineResultFailure = CICDPipelineResultKey.String("failure")
+ // The pipeline run failed due to an error in the CICD system, eg. due to the
+ // worker being killed.
+ // Stability: development
+ CICDPipelineResultError = CICDPipelineResultKey.String("error")
+ // A timeout caused the pipeline run to be interrupted.
+ // Stability: development
+ CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout")
+ // The pipeline run was cancelled, eg. by a user manually cancelling the
+ // pipeline run.
+ // Stability: development
+ CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation")
+ // The pipeline run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineResultSkip = CICDPipelineResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.run.state
+var (
+ // The run pending state spans from the event triggering the pipeline run until
+ // the execution of the run starts (eg. time spent in a queue, provisioning
+ // agents, creating run resources).
+ //
+ // Stability: development
+ CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending")
+ // The executing state spans the execution of any run tasks (eg. build, test).
+ // Stability: development
+ CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing")
+ // The finalizing state spans from when the run has finished executing (eg.
+ // cleanup of run resources).
+ // Stability: development
+ CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing")
+)
+
+// Enum values for cicd.pipeline.task.run.result
+var (
+ // The task run finished successfully.
+ // Stability: development
+ CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success")
+ // The task run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure")
+ // The task run failed due to an error in the CICD system, eg. due to the worker
+ // being killed.
+ // Stability: development
+ CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error")
+ // A timeout caused the task run to be interrupted.
+ // Stability: development
+ CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout")
+ // The task run was cancelled, eg. by a user manually cancelling the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation")
+ // The task run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.task.type
+var (
+ // build
+ // Stability: development
+ CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build")
+ // test
+ // Stability: development
+ CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test")
+ // deploy
+ // Stability: development
+ CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy")
+)
+
+// Enum values for cicd.worker.state
+var (
+ // The worker is not performing work for the CICD system. It is available to the
+ // CICD system to perform work on (online / idle).
+ // Stability: development
+ CICDWorkerStateAvailable = CICDWorkerStateKey.String("available")
+ // The worker is performing work for the CICD system.
+ // Stability: development
+ CICDWorkerStateBusy = CICDWorkerStateKey.String("busy")
+ // The worker is not available to the CICD system (disconnected / down).
+ // Stability: development
+ CICDWorkerStateOffline = CICDWorkerStateKey.String("offline")
+)
+
+// Namespace: client
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.address` SHOULD represent the client address behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port" semantic
+ // conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.port` SHOULD represent the client port behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the "client.address"
+// semantic conventions. It represents the client address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// Namespace: cloud
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id"
+ // semantic conventions. It represents the cloud account ID the resource is
+ // assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "111111111111", "opentelemetry"
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to increase
+ // availability. Availability zone represents the zone where the resource is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-east-1c"
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic
+ // conventions. It represents the geographical region within a cloud provider.
+ // When associated with a resource, this attribute specifies the region where
+ // the resource operates. When calling services or APIs deployed on a cloud,
+ // this attribute identifies the region where the called destination is
+ // deployed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1", "us-east-1"
+ // Note: Refer to your provider's docs to see the available regions, for example
+ // [Alibaba Cloud regions], [AWS regions], [Azure regions],
+ // [Google Cloud regions], or [Tencent Cloud regions].
+ //
+ // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm
+ // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/
+ // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/
+ // [Google Cloud regions]: https://cloud.google.com/about/locations
+ // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id"
+ // semantic conventions. It represents the cloud provider-specific native
+ // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a
+ // [fully qualified resource ID] on Azure, a [full resource name] on GCP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function",
+ // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID",
+ // "/subscriptions//resourceGroups/
+ // /providers/Microsoft.Web/sites//functions/"
+ // Note: On some cloud providers, it may not be possible to determine the full
+ // ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud provider.
+ // The following well-known definitions MUST be used if you set this attribute
+ // and they apply:
+ //
+ // - **AWS Lambda:** The function [ARN].
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias suffix]
+ // with the resolved function version, as the same runtime instance may be
+ // invocable with
+ // multiple different aliases.
+ // - **GCP:** The [URI of the resource]
+ // - **Azure:** The [Fully Qualified Resource ID] of the invoked function,
+ // *not* the function app, having the form
+ //
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`
+ // .
+ // This means that a span attribute MUST be used, as an Azure function app
+ // can host multiple functions that would usually share
+ // a TracerProvider.
+ //
+ //
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+ // [full resource name]: https://google.aip.dev/122#full-resource-names
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html
+ // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names
+ // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the "cloud.region"
+// semantic conventions. It represents the geographical region within a cloud
+// provider. When associated with a resource, this attribute specifies the region
+// where the resource operates. When calling services or APIs deployed on a
+// cloud, this attribute identifies the region where the called destination is
+// deployed.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name]
+// on GCP).
+//
+// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+// [full resource name]: https://google.aip.dev/122#full-resource-names
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// Enum values for cloud.platform
+var (
+ // Akamai Cloud Compute
+ // Stability: development
+ CloudPlatformAkamaiCloudCompute = CloudPlatformKey.String("akamai_cloud.compute")
+ // Alibaba Cloud Elastic Compute Service
+ // Stability: development
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ // Stability: development
+ CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ // Stability: development
+ CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ // Stability: development
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ // Stability: development
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ // Stability: development
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ // Stability: development
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ // Stability: development
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ // Stability: development
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ // Stability: development
+ CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ // Stability: development
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm")
+ // Azure Container Apps
+ // Stability: development
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps")
+ // Azure Container Instances
+ // Stability: development
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances")
+ // Azure Kubernetes Service
+ // Stability: development
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks")
+ // Azure Functions
+ // Stability: development
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions")
+ // Azure App Service
+ // Stability: development
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service")
+ // Azure Red Hat OpenShift
+ // Stability: development
+ CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift")
+ // Google Vertex AI Agent Engine
+ // Stability: development
+ CloudPlatformGCPAgentEngine = CloudPlatformKey.String("gcp.agent_engine")
+ // Google Bare Metal Solution (BMS)
+ // Stability: development
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ // Stability: development
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ // Stability: development
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ // Stability: development
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ // Stability: development
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ // Stability: development
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ // Stability: development
+ CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift")
+ // Server on Hetzner Cloud
+ // Stability: development
+ CloudPlatformHetznerCloudServer = CloudPlatformKey.String("hetzner.cloud_server")
+ // Red Hat OpenShift on IBM Cloud
+ // Stability: development
+ CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Compute on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute")
+ // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ // Stability: development
+ CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ // Stability: development
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ // Stability: development
+ CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf")
+ // Vultr Cloud Compute
+ // Stability: development
+ CloudPlatformVultrCloudCompute = CloudPlatformKey.String("vultr.cloud_compute")
+)
+
+// Enum values for cloud.provider
+var (
+ // Akamai Cloud
+ // Stability: development
+ CloudProviderAkamaiCloud = CloudProviderKey.String("akamai_cloud")
+ // Alibaba Cloud
+ // Stability: development
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ // Stability: development
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // Hetzner
+ // Stability: development
+ CloudProviderHetzner = CloudProviderKey.String("hetzner")
+ // IBM Cloud
+ // Stability: development
+ CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud")
+ // Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud")
+ // Tencent Cloud
+ // Stability: development
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+ // Vultr
+ // Stability: development
+ CloudProviderVultr = CloudProviderKey.String("vultr")
+)
+
+// Namespace: cloudevents
+const (
+ // CloudEventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the [event_id]
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001"
+ //
+ // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+ CloudEventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudEventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the [source]
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123",
+ // "my-service"
+ //
+ // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+ CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudEventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents specification] which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ //
+ // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+ CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudEventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the [subject]
+ // of the event in the context of the event producer (identified by source).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: mynewfile.jpg
+ //
+ // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+ CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudEventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the [event_type]
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2"
+ //
+ // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+ CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudEventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the [event_id]
+// uniquely identifies the event.
+//
+// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+func CloudEventsEventID(val string) attribute.KeyValue {
+ return CloudEventsEventIDKey.String(val)
+}
+
+// CloudEventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the [source]
+// identifies the context in which an event happened.
+//
+// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+func CloudEventsEventSource(val string) attribute.KeyValue {
+ return CloudEventsEventSourceKey.String(val)
+}
+
+// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the
+// "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents specification] which the event uses.
+//
+// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+func CloudEventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudEventsEventSpecVersionKey.String(val)
+}
+
+// CloudEventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the [subject]
+// of the event in the context of the event producer (identified by source).
+//
+// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+func CloudEventsEventSubject(val string) attribute.KeyValue {
+ return CloudEventsEventSubjectKey.String(val)
+}
+
+// CloudEventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the [event_type]
+// contains a value describing the type of event related to the originating
+// occurrence.
+//
+// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+func CloudEventsEventType(val string) attribute.KeyValue {
+ return CloudEventsEventTypeKey.String(val)
+}
+
+// Namespace: cloudfoundry
+const (
+ // CloudFoundryAppIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_id`. This is the same value as
+ // reported by `cf app --guid`.
+ CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id")
+
+ // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+ // of the application instance. 0 when just one instance is active.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0", "1"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the application instance index for applications
+ // deployed on the runtime.
+ //
+ // Application instrumentation should use the value from environment
+ // variable `CF_INSTANCE_INDEX`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id")
+
+ // CloudFoundryAppNameKey is the attribute Key conforming to the
+ // "cloudfoundry.app.name" semantic conventions. It represents the name of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-app-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_name`. This is the same value
+ // as reported by `cf apps`.
+ CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name")
+
+ // CloudFoundryOrgIDKey is the attribute Key conforming to the
+ // "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+ // CloudFoundry org the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_id`. This is the same value as
+ // reported by `cf org --guid`.
+ CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id")
+
+ // CloudFoundryOrgNameKey is the attribute Key conforming to the
+ // "cloudfoundry.org.name" semantic conventions. It represents the name of the
+ // CloudFoundry organization the app is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_name`. This is the same value as
+ // reported by `cf orgs`.
+ CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name")
+
+ // CloudFoundryProcessIDKey is the attribute Key conforming to the
+ // "cloudfoundry.process.id" semantic conventions. It represents the UID
+ // identifying the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to
+ // `VCAP_APPLICATION.app_id` for applications deployed to the runtime.
+ // For system components, this could be the actual PID.
+ CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id")
+
+ // CloudFoundryProcessTypeKey is the attribute Key conforming to the
+ // "cloudfoundry.process.type" semantic conventions. It represents the type of
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "web"
+ // Note: CloudFoundry applications can consist of multiple jobs. Usually the
+ // main process will be of type `web`. There can be additional background
+ // tasks or side-cars with different process types.
+ CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type")
+
+ // CloudFoundrySpaceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_id`. This is the same value as
+ // reported by `cf space --guid`.
+ CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id")
+
+ // CloudFoundrySpaceNameKey is the attribute Key conforming to the
+ // "cloudfoundry.space.name" semantic conventions. It represents the name of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-space-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_name`. This is the same value as
+ // reported by `cf spaces`.
+ CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name")
+
+ // CloudFoundrySystemIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.id" semantic conventions. It represents a guid or
+ // another name describing the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cf/gorouter"
+ // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope].
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the component name, e.g. "gorouter", for
+ // CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.id` should be set to
+ // `spec.deployment/spec.name`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id")
+
+ // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+ // describing the concrete instance of the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the vm id for CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.instance.id` should be set to `spec.id`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id")
+)
+
+// CloudFoundryAppID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+// application.
+func CloudFoundryAppID(val string) attribute.KeyValue {
+ return CloudFoundryAppIDKey.String(val)
+}
+
+// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+// of the application instance. 0 when just one instance is active.
+func CloudFoundryAppInstanceID(val string) attribute.KeyValue {
+ return CloudFoundryAppInstanceIDKey.String(val)
+}
+
+// CloudFoundryAppName returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.name" semantic conventions. It represents the name of the
+// application.
+func CloudFoundryAppName(val string) attribute.KeyValue {
+ return CloudFoundryAppNameKey.String(val)
+}
+
+// CloudFoundryOrgID returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+// CloudFoundry org the application is running in.
+func CloudFoundryOrgID(val string) attribute.KeyValue {
+ return CloudFoundryOrgIDKey.String(val)
+}
+
+// CloudFoundryOrgName returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.name" semantic conventions. It represents the name of the
+// CloudFoundry organization the app is running in.
+func CloudFoundryOrgName(val string) attribute.KeyValue {
+ return CloudFoundryOrgNameKey.String(val)
+}
+
+// CloudFoundryProcessID returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.id" semantic conventions. It represents the UID
+// identifying the process.
+func CloudFoundryProcessID(val string) attribute.KeyValue {
+ return CloudFoundryProcessIDKey.String(val)
+}
+
+// CloudFoundryProcessType returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.type" semantic conventions. It represents the type of
+// process.
+func CloudFoundryProcessType(val string) attribute.KeyValue {
+ return CloudFoundryProcessTypeKey.String(val)
+}
+
+// CloudFoundrySpaceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceID(val string) attribute.KeyValue {
+ return CloudFoundrySpaceIDKey.String(val)
+}
+
+// CloudFoundrySpaceName returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.name" semantic conventions. It represents the name of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceName(val string) attribute.KeyValue {
+ return CloudFoundrySpaceNameKey.String(val)
+}
+
+// CloudFoundrySystemID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.id" semantic conventions. It represents a guid or another
+// name describing the event source.
+func CloudFoundrySystemID(val string) attribute.KeyValue {
+ return CloudFoundrySystemIDKey.String(val)
+}
+
+// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+// describing the concrete instance of the event source.
+func CloudFoundrySystemInstanceID(val string) attribute.KeyValue {
+ return CloudFoundrySystemInstanceIDKey.String(val)
+}
+
+// Namespace: code
+const (
+ // CodeColumnNumberKey is the attribute Key conforming to the
+ // "code.column.number" semantic conventions. It represents the column number in
+ // `code.file.path` best representing the operation. It SHOULD point within the
+ // code unit named in `code.function.name`. This attribute MUST NOT be used on
+ // the Profile signal since the data is already captured in 'message Line'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeColumnNumberKey = attribute.Key("code.column.number")
+
+ // CodeFilePathKey is the attribute Key conforming to the "code.file.path"
+ // semantic conventions. It represents the source code file name that identifies
+ // the code unit as uniquely as possible (preferably an absolute file path).
+ // This attribute MUST NOT be used on the Profile signal since the data is
+ // already captured in 'message Function'. This constraint is imposed to prevent
+ // redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: /usr/local/MyApplication/content_root/app/index.php
+ CodeFilePathKey = attribute.Key("code.file.path")
+
+ // CodeFunctionNameKey is the attribute Key conforming to the
+ // "code.function.name" semantic conventions. It represents the method or
+ // function fully-qualified name without arguments. The value should fit the
+ // natural representation of the language runtime, which is also likely the same
+ // used within `code.stacktrace` attribute value. This attribute MUST NOT be
+ // used on the Profile signal since the data is already captured in 'message
+ // Function'. This constraint is imposed to prevent redundancy and maintain data
+ // integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "com.example.MyHttpService.serveRequest",
+ // "GuzzleHttp\Client::transfer", "fopen"
+ // Note: Values and format depends on each language runtime, thus it is
+ // impossible to provide an exhaustive list of examples.
+ // The values are usually the same (or prefixes of) the ones found in native
+ // stack trace representation stored in
+ // `code.stacktrace` without information on arguments.
+ //
+ // Examples:
+ //
+ // - Java method: `com.example.MyHttpService.serveRequest`
+ // - Java anonymous class method: `com.mycompany.Main$1.myMethod`
+ // - Java lambda method:
+ // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod`
+ // - PHP function: `GuzzleHttp\Client::transfer`
+ // - Go function: `github.com/my/repo/pkg.foo.func5`
+ // - Elixir: `OpenTelemetry.Ctx.new`
+ // - Erlang: `opentelemetry_ctx:new`
+ // - Rust: `playground::my_module::my_cool_func`
+ // - C function: `fopen`
+ CodeFunctionNameKey = attribute.Key("code.function.name")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.line.number"
+ // semantic conventions. It represents the line number in `code.file.path` best
+ // representing the operation. It SHOULD point within the code unit named in
+ // `code.function.name`. This attribute MUST NOT be used on the Profile signal
+ // since the data is already captured in 'message Line'. This constraint is
+ // imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeLineNumberKey = attribute.Key("code.line.number")
+
+ // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace"
+ // semantic conventions. It represents a stacktrace as a string in the natural
+ // representation for the language runtime. The representation is identical to
+ // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile
+ // signal since the data is already captured in 'message Location'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ //
+ // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumnNumber returns an attribute KeyValue conforming to the
+// "code.column.number" semantic conventions. It represents the column number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeColumnNumber(val int) attribute.KeyValue {
+ return CodeColumnNumberKey.Int(val)
+}
+
+// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path"
+// semantic conventions. It represents the source code file name that identifies
+// the code unit as uniquely as possible (preferably an absolute file path). This
+// attribute MUST NOT be used on the Profile signal since the data is already
+// captured in 'message Function'. This constraint is imposed to prevent
+// redundancy and maintain data integrity.
+func CodeFilePath(val string) attribute.KeyValue {
+ return CodeFilePathKey.String(val)
+}
+
+// CodeFunctionName returns an attribute KeyValue conforming to the
+// "code.function.name" semantic conventions. It represents the method or
+// function fully-qualified name without arguments. The value should fit the
+// natural representation of the language runtime, which is also likely the same
+// used within `code.stacktrace` attribute value. This attribute MUST NOT be used
+// on the Profile signal since the data is already captured in 'message
+// Function'. This constraint is imposed to prevent redundancy and maintain data
+// integrity.
+func CodeFunctionName(val string) attribute.KeyValue {
+ return CodeFunctionNameKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the
+// "code.line.number" semantic conventions. It represents the line number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a string
+// in the natural representation for the language runtime. The representation is
+// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the
+// Profile signal since the data is already captured in 'message Location'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+//
+// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// Namespace: container
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used to
+ // run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol"
+ // Note: If using embedded credentials or sensitive data, it is recommended to
+ // remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol", "--config", "config.yaml"
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full command
+ // run by the container as a single string representing the full command.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol --config config.yaml"
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCSIPluginNameKey is the attribute Key conforming to the
+ // "container.csi.plugin.name" semantic conventions. It represents the name of
+ // the CSI ([Container Storage Interface]) plugin used by the volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pd.csi.storage.gke.io"
+ // Note: This can sometimes be referred to as a "driver" in CSI implementations.
+ // This should represent the `name` field of the GetPluginInfo RPC.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name")
+
+ // ContainerCSIVolumeIDKey is the attribute Key conforming to the
+ // "container.csi.volume.id" semantic conventions. It represents the unique
+ // volume ID returned by the CSI ([Container Storage Interface]) plugin.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk"
+ // Note: This can sometimes be referred to as a "volume handle" in CSI
+ // implementations. This should represent the `Volume.volume_id` field in CSI
+ // spec.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id" semantic
+ // conventions. It represents the container ID. Usually a UUID, as for example
+ // used to [identify Docker containers]. The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "a3bf90e006b2"
+ //
+ // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime specific
+ // image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f"
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect [API]
+ // endpoint.
+ // K8s defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`
+ // .
+ // The ID is assigned by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ //
+ // [API]: https://docs.docker.com/reference/api/engine/version/v1.52/#tag/Container/operation/ContainerInspect
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of the
+ // image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "gcr.io/opentelemetry/operator"
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the repo
+ // digests of the container image as provided by the container runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples:
+ // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb",
+ // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
+ // Note: [Docker] and [CRI] report those under the `RepoDigests` field.
+ //
+ // [Docker]: https://docs.docker.com/reference/api/engine/version/v1.52/#tag/Image/operation/ImageInspect
+ // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image Inspect]. Should be only
+ // the `` section of the full name for example from
+ // `registry.example.com/my-org/my-image:`.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "v1.27.1", "3.5.7-0"
+ //
+ // [Docker Image Inspect]: https://docs.docker.com/reference/api/engine/version/v1.52/#tag/Image/operation/ImageInspect
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-autoconf"
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeDescriptionKey is the attribute Key conforming to the
+ // "container.runtime.description" semantic conventions. It represents a
+ // description about the runtime which could include, for example details about
+ // the CRI/API version being used or other customisations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "docker://19.3.1 - CRI: 1.22.0"
+ ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description")
+
+ // ContainerRuntimeNameKey is the attribute Key conforming to the
+ // "container.runtime.name" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "docker", "containerd", "rkt"
+ ContainerRuntimeNameKey = attribute.Key("container.runtime.name")
+
+ // ContainerRuntimeVersionKey is the attribute Key conforming to the
+ // "container.runtime.version" semantic conventions. It represents the version
+ // of the runtime of this process, as returned by the runtime without
+ // modification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0.0
+ ContainerRuntimeVersionKey = attribute.Key("container.runtime.version")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container.
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full command
+// run by the container as a single string representing the full command.
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerCSIPluginName returns an attribute KeyValue conforming to the
+// "container.csi.plugin.name" semantic conventions. It represents the name of
+// the CSI ([Container Storage Interface]) plugin used by the volume.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIPluginName(val string) attribute.KeyValue {
+ return ContainerCSIPluginNameKey.String(val)
+}
+
+// ContainerCSIVolumeID returns an attribute KeyValue conforming to the
+// "container.csi.volume.id" semantic conventions. It represents the unique
+// volume ID returned by the CSI ([Container Storage Interface]) plugin.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIVolumeID(val string) attribute.KeyValue {
+ return ContainerCSIVolumeIDKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the "container.id"
+// semantic conventions. It represents the container ID. Usually a UUID, as for
+// example used to [identify Docker containers]. The UUID might be abbreviated.
+//
+// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime specific
+// image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container image
+// tags. An example can be found in [Docker Image Inspect]. Should be only the
+// `` section of the full name for example from
+// `registry.example.com/my-org/my-image:`.
+//
+// [Docker Image Inspect]: https://docs.docker.com/reference/api/engine/version/v1.52/#tag/Image/operation/ImageInspect
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerLabel returns an attribute KeyValue conforming to the
+// "container.label" semantic conventions. It represents the container labels,
+// `` being the label name, the value being the label value.
+func ContainerLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("container.label."+key, val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the "container.name"
+// semantic conventions. It represents the container name used by container
+// runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntimeDescription returns an attribute KeyValue conforming to the
+// "container.runtime.description" semantic conventions. It represents a
+// description about the runtime which could include, for example details about
+// the CRI/API version being used or other customisations.
+func ContainerRuntimeDescription(val string) attribute.KeyValue {
+ return ContainerRuntimeDescriptionKey.String(val)
+}
+
+// ContainerRuntimeName returns an attribute KeyValue conforming to the
+// "container.runtime.name" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntimeName(val string) attribute.KeyValue {
+ return ContainerRuntimeNameKey.String(val)
+}
+
+// ContainerRuntimeVersion returns an attribute KeyValue conforming to the
+// "container.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without modification.
+func ContainerRuntimeVersion(val string) attribute.KeyValue {
+ return ContainerRuntimeVersionKey.String(val)
+}
+
+// Namespace: cpu
+const (
+ // CPULogicalNumberKey is the attribute Key conforming to the
+ // "cpu.logical_number" semantic conventions. It represents the logical CPU
+ // number [0..n-1].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ CPULogicalNumberKey = attribute.Key("cpu.logical_number")
+
+ // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic
+ // conventions. It represents the mode of the CPU.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "user", "system"
+ CPUModeKey = attribute.Key("cpu.mode")
+)
+
+// CPULogicalNumber returns an attribute KeyValue conforming to the
+// "cpu.logical_number" semantic conventions. It represents the logical CPU
+// number [0..n-1].
+func CPULogicalNumber(val int) attribute.KeyValue {
+ return CPULogicalNumberKey.Int(val)
+}
+
+// Enum values for cpu.mode
+var (
+ // User
+ // Stability: development
+ CPUModeUser = CPUModeKey.String("user")
+ // System
+ // Stability: development
+ CPUModeSystem = CPUModeKey.String("system")
+ // Nice
+ // Stability: development
+ CPUModeNice = CPUModeKey.String("nice")
+ // Idle
+ // Stability: development
+ CPUModeIdle = CPUModeKey.String("idle")
+ // IO Wait
+ // Stability: development
+ CPUModeIOWait = CPUModeKey.String("iowait")
+ // Interrupt
+ // Stability: development
+ CPUModeInterrupt = CPUModeKey.String("interrupt")
+ // Steal
+ // Stability: development
+ CPUModeSteal = CPUModeKey.String("steal")
+ // Kernel
+ // Stability: development
+ CPUModeKernel = CPUModeKey.String("kernel")
+)
+
+// Namespace: db
+const (
+ // DBClientConnectionPoolNameKey is the attribute Key conforming to the
+ // "db.client.connection.pool.name" semantic conventions. It represents the name
+ // of the connection pool; unique within the instrumented application. In case
+ // the connection pool implementation doesn't provide a name, instrumentation
+ // SHOULD use a combination of parameters that would make the name unique, for
+ // example, combining attributes `server.address`, `server.port`, and
+ // `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+ // Instrumentations that generate connection pool name following different
+ // patterns SHOULD document it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myDataSource"
+ DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name")
+
+ // DBClientConnectionStateKey is the attribute Key conforming to the
+ // "db.client.connection.state" semantic conventions. It represents the state of
+ // a connection in the pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle"
+ DBClientConnectionStateKey = attribute.Key("db.client.connection.state")
+
+ // DBCollectionNameKey is the attribute Key conforming to the
+ // "db.collection.name" semantic conventions. It represents the name of a
+ // collection (table, container) within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "public.users", "customers"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The collection name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple collections
+ // in non-batch operations.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // collection name then that collection name SHOULD be used.
+ DBCollectionNameKey = attribute.Key("db.collection.name")
+
+ // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic
+ // conventions. It represents the name of the database, fully qualified within
+ // the server address and port.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "customers", "test.users"
+ // Note: If a database system has multiple namespace components, they SHOULD be
+ // concatenated from the most general to the most specific namespace component,
+ // using `|` as a separator between the components. Any missing components (and
+ // their associated separators) SHOULD be omitted.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.namespace` means in the context of that system.
+ // It is RECOMMENDED to capture the value as provided by the application without
+ // attempting to do any case normalization.
+ DBNamespaceKey = attribute.Key("db.namespace")
+
+ // DBOperationBatchSizeKey is the attribute Key conforming to the
+ // "db.operation.batch.size" semantic conventions. It represents the number of
+ // queries included in a batch operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 2, 3, 4
+ // Note: Operations are only considered batches when they contain two or more
+ // operations, and so `db.operation.batch.size` SHOULD never be `1`.
+ DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size")
+
+ // DBOperationNameKey is the attribute Key conforming to the "db.operation.name"
+ // semantic conventions. It represents the name of the operation or command
+ // being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "findAndModify", "HMSET", "SELECT"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The operation name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple operations
+ // in non-batch operations.
+ //
+ // If spaces can occur in the operation name, multiple consecutive spaces
+ // SHOULD be normalized to a single space.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // operation name
+ // then that operation name SHOULD be used prepended by `BATCH `,
+ // otherwise `db.operation.name` SHOULD be `BATCH` or some other database
+ // system specific term if more applicable.
+ DBOperationNameKey = attribute.Key("db.operation.name")
+
+ // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary"
+ // semantic conventions. It represents the low cardinality summary of a database
+ // query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get
+ // user by id"
+ // Note: The query summary describes a class of database queries and is useful
+ // as a grouping key, especially when analyzing telemetry for database
+ // calls involving complex queries.
+ //
+ // Summary may be available to the instrumentation through
+ // instrumentation hooks or other means. If it is not available,
+ // instrumentations
+ // that support query parsing SHOULD generate a summary following
+ // [Generating query summary]
+ // section.
+ //
+ // [Generating query summary]: /docs/db/database-spans.md#generating-a-summary-of-the-query
+ DBQuerySummaryKey = attribute.Key("db.query.summary")
+
+ // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+ // semantic conventions. It represents the database query being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?"
+ // Note: For sanitization see [Sanitization of `db.query.text`].
+ // For batch operations, if the individual operations are known to have the same
+ // query text then that query text SHOULD be used, otherwise all of the
+ // individual query texts SHOULD be concatenated with separator `; ` or some
+ // other database system specific separator if more applicable.
+ // Parameterized query text SHOULD NOT be sanitized. Even though parameterized
+ // query text can potentially have sensitive data, by using a parameterized
+ // query the user is giving a strong signal that any sensitive data will be
+ // passed as parameter values, and the benefit to observability of capturing the
+ // static part of the query text by default outweighs the risk.
+ //
+ // [Sanitization of `db.query.text`]: /docs/db/database-spans.md#sanitization-of-dbquerytext
+ DBQueryTextKey = attribute.Key("db.query.text")
+
+ // DBResponseReturnedRowsKey is the attribute Key conforming to the
+ // "db.response.returned_rows" semantic conventions. It represents the number of
+ // rows returned by the operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10, 30, 1000
+ DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows")
+
+ // DBResponseStatusCodeKey is the attribute Key conforming to the
+ // "db.response.status_code" semantic conventions. It represents the database
+ // response status code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "102", "ORA-17002", "08P01", "404"
+ // Note: The status code returned by the database. Usually it represents an
+ // error code, but may also represent partial success, warning, or differentiate
+ // between various types of successful outcomes.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.response.status_code` means in the context of that system.
+ DBResponseStatusCodeKey = attribute.Key("db.response.status_code")
+
+ // DBStoredProcedureNameKey is the attribute Key conforming to the
+ // "db.stored_procedure.name" semantic conventions. It represents the name of a
+ // stored procedure within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GetCustomer"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // stored procedure name then that stored procedure name SHOULD be used.
+ DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name")
+
+ // DBSystemNameKey is the attribute Key conforming to the "db.system.name"
+ // semantic conventions. It represents the database management system (DBMS)
+ // product as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ // Note: The actual DBMS may differ from the one identified by the client. For
+ // example, when using PostgreSQL client libraries to connect to a CockroachDB,
+ // the `db.system.name` is set to `postgresql` based on the instrumentation's
+ // best knowledge.
+ DBSystemNameKey = attribute.Key("db.system.name")
+)
+
+// DBClientConnectionPoolName returns an attribute KeyValue conforming to the
+// "db.client.connection.pool.name" semantic conventions. It represents the name
+// of the connection pool; unique within the instrumented application. In case
+// the connection pool implementation doesn't provide a name, instrumentation
+// SHOULD use a combination of parameters that would make the name unique, for
+// example, combining attributes `server.address`, `server.port`, and
+// `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+// Instrumentations that generate connection pool name following different
+// patterns SHOULD document it.
+func DBClientConnectionPoolName(val string) attribute.KeyValue {
+ return DBClientConnectionPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+ return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the "db.namespace"
+// semantic conventions. It represents the name of the database, fully qualified
+// within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+ return DBNamespaceKey.String(val)
+}
+
+// DBOperationBatchSize returns an attribute KeyValue conforming to the
+// "db.operation.batch.size" semantic conventions. It represents the number of
+// queries included in a batch operation.
+func DBOperationBatchSize(val int) attribute.KeyValue {
+ return DBOperationBatchSizeKey.Int(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+ return DBOperationNameKey.String(val)
+}
+
+// DBOperationParameter returns an attribute KeyValue conforming to the
+// "db.operation.parameter" semantic conventions. It represents a database
+// operation parameter, with `` being the parameter name, and the attribute
+// value being a string representation of the parameter value.
+func DBOperationParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.operation.parameter."+key, val)
+}
+
+// DBQueryParameter returns an attribute KeyValue conforming to the
+// "db.query.parameter" semantic conventions. It represents a database query
+// parameter, with `` being the parameter name, and the attribute value
+// being a string representation of the parameter value.
+func DBQueryParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.query.parameter."+key, val)
+}
+
+// DBQuerySummary returns an attribute KeyValue conforming to the
+// "db.query.summary" semantic conventions. It represents the low cardinality
+// summary of a database query.
+func DBQuerySummary(val string) attribute.KeyValue {
+ return DBQuerySummaryKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the "db.query.text"
+// semantic conventions. It represents the database query being executed.
+func DBQueryText(val string) attribute.KeyValue {
+ return DBQueryTextKey.String(val)
+}
+
+// DBResponseReturnedRows returns an attribute KeyValue conforming to the
+// "db.response.returned_rows" semantic conventions. It represents the number of
+// rows returned by the operation.
+func DBResponseReturnedRows(val int) attribute.KeyValue {
+ return DBResponseReturnedRowsKey.Int(val)
+}
+
+// DBResponseStatusCode returns an attribute KeyValue conforming to the
+// "db.response.status_code" semantic conventions. It represents the database
+// response status code.
+func DBResponseStatusCode(val string) attribute.KeyValue {
+ return DBResponseStatusCodeKey.String(val)
+}
+
+// DBStoredProcedureName returns an attribute KeyValue conforming to the
+// "db.stored_procedure.name" semantic conventions. It represents the name of a
+// stored procedure within the database.
+func DBStoredProcedureName(val string) attribute.KeyValue {
+ return DBStoredProcedureNameKey.String(val)
+}
+
+// Enum values for db.client.connection.state
+var (
+ // idle
+ // Stability: development
+ DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle")
+ // used
+ // Stability: development
+ DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used")
+)
+
+// Enum values for db.system.name
+var (
+ // Some other SQL database. Fallback only.
+ // Stability: development
+ DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql")
+ // [Adabas (Adaptable Database System)]
+ // Stability: development
+ //
+ // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas
+ DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas")
+ // [Actian Ingres]
+ // Stability: development
+ //
+ // [Actian Ingres]: https://www.actian.com/databases/ingres/
+ DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres")
+ // [Amazon DynamoDB]
+ // Stability: development
+ //
+ // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/
+ DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb")
+ // [Amazon Redshift]
+ // Stability: development
+ //
+ // [Amazon Redshift]: https://aws.amazon.com/redshift/
+ DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift")
+ // [Azure Cosmos DB]
+ // Stability: development
+ //
+ // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db
+ DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb")
+ // [InterSystems Caché]
+ // Stability: development
+ //
+ // [InterSystems Caché]: https://www.intersystems.com/products/cache/
+ DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache")
+ // [Apache Cassandra]
+ // Stability: development
+ //
+ // [Apache Cassandra]: https://cassandra.apache.org/
+ DBSystemNameCassandra = DBSystemNameKey.String("cassandra")
+ // [ClickHouse]
+ // Stability: development
+ //
+ // [ClickHouse]: https://clickhouse.com/
+ DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse")
+ // [CockroachDB]
+ // Stability: development
+ //
+ // [CockroachDB]: https://www.cockroachlabs.com/
+ DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb")
+ // [Couchbase]
+ // Stability: development
+ //
+ // [Couchbase]: https://www.couchbase.com/
+ DBSystemNameCouchbase = DBSystemNameKey.String("couchbase")
+ // [Apache CouchDB]
+ // Stability: development
+ //
+ // [Apache CouchDB]: https://couchdb.apache.org/
+ DBSystemNameCouchDB = DBSystemNameKey.String("couchdb")
+ // [Apache Derby]
+ // Stability: development
+ //
+ // [Apache Derby]: https://db.apache.org/derby/
+ DBSystemNameDerby = DBSystemNameKey.String("derby")
+ // [Elasticsearch]
+ // Stability: development
+ //
+ // [Elasticsearch]: https://www.elastic.co/elasticsearch
+ DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch")
+ // [Firebird]
+ // Stability: development
+ //
+ // [Firebird]: https://www.firebirdsql.org/
+ DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql")
+ // [Google Cloud Spanner]
+ // Stability: development
+ //
+ // [Google Cloud Spanner]: https://cloud.google.com/spanner
+ DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner")
+ // [Apache Geode]
+ // Stability: development
+ //
+ // [Apache Geode]: https://geode.apache.org/
+ DBSystemNameGeode = DBSystemNameKey.String("geode")
+ // [H2 Database]
+ // Stability: development
+ //
+ // [H2 Database]: https://h2database.com/
+ DBSystemNameH2database = DBSystemNameKey.String("h2database")
+ // [Apache HBase]
+ // Stability: development
+ //
+ // [Apache HBase]: https://hbase.apache.org/
+ DBSystemNameHBase = DBSystemNameKey.String("hbase")
+ // [Apache Hive]
+ // Stability: development
+ //
+ // [Apache Hive]: https://hive.apache.org/
+ DBSystemNameHive = DBSystemNameKey.String("hive")
+ // [HyperSQL Database]
+ // Stability: development
+ //
+ // [HyperSQL Database]: https://hsqldb.org/
+ DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb")
+ // [IBM Db2]
+ // Stability: development
+ //
+ // [IBM Db2]: https://www.ibm.com/db2
+ DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2")
+ // [IBM Informix]
+ // Stability: development
+ //
+ // [IBM Informix]: https://www.ibm.com/products/informix
+ DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix")
+ // [IBM Netezza]
+ // Stability: development
+ //
+ // [IBM Netezza]: https://www.ibm.com/products/netezza
+ DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza")
+ // [InfluxDB]
+ // Stability: development
+ //
+ // [InfluxDB]: https://www.influxdata.com/
+ DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb")
+ // [Instant]
+ // Stability: development
+ //
+ // [Instant]: https://www.instantdb.com/
+ DBSystemNameInstantDB = DBSystemNameKey.String("instantdb")
+ // [MariaDB]
+ // Stability: stable
+ //
+ // [MariaDB]: https://mariadb.org/
+ DBSystemNameMariaDB = DBSystemNameKey.String("mariadb")
+ // [Memcached]
+ // Stability: development
+ //
+ // [Memcached]: https://memcached.org/
+ DBSystemNameMemcached = DBSystemNameKey.String("memcached")
+ // [MongoDB]
+ // Stability: development
+ //
+ // [MongoDB]: https://www.mongodb.com/
+ DBSystemNameMongoDB = DBSystemNameKey.String("mongodb")
+ // [Microsoft SQL Server]
+ // Stability: stable
+ //
+ // [Microsoft SQL Server]: https://www.microsoft.com/sql-server
+ DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server")
+ // [MySQL]
+ // Stability: stable
+ //
+ // [MySQL]: https://www.mysql.com/
+ DBSystemNameMySQL = DBSystemNameKey.String("mysql")
+ // [Neo4j]
+ // Stability: development
+ //
+ // [Neo4j]: https://neo4j.com/
+ DBSystemNameNeo4j = DBSystemNameKey.String("neo4j")
+ // [OpenSearch]
+ // Stability: development
+ //
+ // [OpenSearch]: https://opensearch.org/
+ DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch")
+ // [Oracle Database]
+ // Stability: development
+ //
+ // [Oracle Database]: https://www.oracle.com/database/
+ DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db")
+ // [PostgreSQL]
+ // Stability: stable
+ //
+ // [PostgreSQL]: https://www.postgresql.org/
+ DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql")
+ // [Redis]
+ // Stability: development
+ //
+ // [Redis]: https://redis.io/
+ DBSystemNameRedis = DBSystemNameKey.String("redis")
+ // [SAP HANA]
+ // Stability: development
+ //
+ // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html
+ DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana")
+ // [SAP MaxDB]
+ // Stability: development
+ //
+ // [SAP MaxDB]: https://maxdb.sap.com/
+ DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb")
+ // [SQLite]
+ // Stability: development
+ //
+ // [SQLite]: https://www.sqlite.org/
+ DBSystemNameSQLite = DBSystemNameKey.String("sqlite")
+ // [Teradata]
+ // Stability: development
+ //
+ // [Teradata]: https://www.teradata.com/
+ DBSystemNameTeradata = DBSystemNameKey.String("teradata")
+ // [Trino]
+ // Stability: development
+ //
+ // [Trino]: https://trino.io/
+ DBSystemNameTrino = DBSystemNameKey.String("trino")
+)
+
+// Namespace: deployment
+const (
+ // DeploymentEnvironmentNameKey is the attribute Key conforming to the
+ // "deployment.environment.name" semantic conventions. It represents the name of
+ // the [deployment environment] (aka deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "staging", "production"
+ // Note: `deployment.environment.name` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id` resource
+ // attributes.
+ // This implies that resources carrying the following attribute combinations
+ // MUST be
+ // considered to be identifying the same service:
+ //
+ // - `service.name=frontend`, `deployment.environment.name=production`
+ // - `service.name=frontend`, `deployment.environment.name=staging`.
+ //
+ //
+ // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+ DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name")
+
+ // DeploymentIDKey is the attribute Key conforming to the "deployment.id"
+ // semantic conventions. It represents the id of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1208"
+ DeploymentIDKey = attribute.Key("deployment.id")
+
+ // DeploymentNameKey is the attribute Key conforming to the "deployment.name"
+ // semantic conventions. It represents the name of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "deploy my app", "deploy-frontend"
+ DeploymentNameKey = attribute.Key("deployment.name")
+
+ // DeploymentStatusKey is the attribute Key conforming to the
+ // "deployment.status" semantic conventions. It represents the status of the
+ // deployment.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ DeploymentStatusKey = attribute.Key("deployment.status")
+)
+
+// DeploymentEnvironmentName returns an attribute KeyValue conforming to the
+// "deployment.environment.name" semantic conventions. It represents the name of
+// the [deployment environment] (aka deployment tier).
+//
+// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+func DeploymentEnvironmentName(val string) attribute.KeyValue {
+ return DeploymentEnvironmentNameKey.String(val)
+}
+
+// DeploymentID returns an attribute KeyValue conforming to the "deployment.id"
+// semantic conventions. It represents the id of the deployment.
+func DeploymentID(val string) attribute.KeyValue {
+ return DeploymentIDKey.String(val)
+}
+
+// DeploymentName returns an attribute KeyValue conforming to the
+// "deployment.name" semantic conventions. It represents the name of the
+// deployment.
+func DeploymentName(val string) attribute.KeyValue {
+ return DeploymentNameKey.String(val)
+}
+
+// Enum values for deployment.status
+var (
+ // failed
+ // Stability: development
+ DeploymentStatusFailed = DeploymentStatusKey.String("failed")
+ // succeeded
+ // Stability: development
+ DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded")
+)
+
+// Namespace: destination
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the destination
+ // address - domain name if available without reverse DNS lookup; otherwise, IP
+ // address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the source side, and when communicating through an
+ // intermediary, `destination.address` SHOULD represent the destination address
+ // behind any intermediaries, for example proxies, if it's available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the "destination.port"
+ // semantic conventions. It represents the destination port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number.
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Namespace: device
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123456789012345", "01:23:45:67:89:AB"
+ // Note: Its value SHOULD be identical for all apps on a device and it SHOULD
+ // NOT change if an app is uninstalled and re-installed.
+ // However, it might be resettable by the user for all apps on a device.
+ // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be
+ // used as values.
+ //
+ // More information about Android identifier best practices can be found in the
+ // [Android user data IDs guide].
+ //
+ // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution
+ // > should be taken when storing personal data or anything which can identify a
+ // > user. GDPR and data protection laws may apply,
+ // > ensure you do your own due diligence.> Due to these reasons, this
+ // > identifier is not recommended for consumer applications and will likely
+ // > result in rejection from both Google Play and App Store.
+ // > However, it may be appropriate for specific enterprise scenarios, such as
+ // > kiosk devices or enterprise-managed devices, with appropriate compliance
+ // > clearance.
+ // > Any instrumentation providing this identifier MUST implement it as an
+ // > opt-in feature.> See [`app.installation.id`]> for a more
+ // > privacy-preserving alternative.
+ //
+ // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids
+ // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of the
+ // device manufacturer.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apple", "Samsung"
+ // Note: The Android OS provides this field via [Build]. iOS apps SHOULD
+ // hardcode the value `Apple`.
+ //
+ // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone3,4", "SM-G920F"
+ // Note: It's recommended this value represents a machine-readable version of
+ // the model identifier rather than the market or consumer-friendly name of the
+ // device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the "device.model.name"
+ // semantic conventions. It represents the marketing name for the device model.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone 6s Plus", "Samsung Galaxy S6"
+ // Note: It's recommended this value represents a human-readable version of the
+ // device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic
+// conventions. It represents a unique identifier representing the device.
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer.
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device.
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name for
+// the device model.
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// Namespace: disk
+const (
+ // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction"
+ // semantic conventions. It represents the disk IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "read"
+ DiskIODirectionKey = attribute.Key("disk.io.direction")
+)
+
+// Enum values for disk.io.direction
+var (
+ // read
+ // Stability: development
+ DiskIODirectionRead = DiskIODirectionKey.String("read")
+ // write
+ // Stability: development
+ DiskIODirectionWrite = DiskIODirectionKey.String("write")
+)
+
+// Namespace: dns
+const (
+ // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic
+ // conventions. It represents the list of IPv4 or IPv6 addresses resolved during
+ // DNS lookup.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ DNSAnswersKey = attribute.Key("dns.answers")
+
+ // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name"
+ // semantic conventions. It represents the name being queried.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.example.com", "opentelemetry.io"
+ // Note: The name represents the queried domain name as it appears in the DNS
+ // query without any additional normalization.
+ DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers"
+// semantic conventions. It represents the list of IPv4 or IPv6 addresses
+// resolved during DNS lookup.
+func DNSAnswers(val ...string) attribute.KeyValue {
+ return DNSAnswersKey.StringSlice(val)
+}
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+ return DNSQuestionNameKey.String(val)
+}
+
+// Namespace: elasticsearch
+const (
+ // ElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "elasticsearch.node.name" semantic conventions. It represents the represents
+ // the human-readable identifier of the node/instance to which a request was
+ // routed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-0000000001"
+ ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name")
+)
+
+// ElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "elasticsearch.node.name" semantic conventions. It represents the represents
+// the human-readable identifier of the node/instance to which a request was
+// routed.
+func ElasticsearchNodeName(val string) attribute.KeyValue {
+ return ElasticsearchNodeNameKey.String(val)
+}
+
+// Namespace: enduser
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic
+ // conventions. It represents the unique identifier of an end user in the
+ // system. It maybe a username, email address, or other identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "username"
+ // Note: Unique identifier of an end user in the system.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (PII) information.
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id"
+ // semantic conventions. It represents the pseudonymous identifier of an end
+ // user. This identifier should be a random value that is not directly linked or
+ // associated with the end user's actual identity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "QdH5CAWJgqVT4rOr0qtumf"
+ // Note: Pseudonymous identifier of an end user.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (linkable PII) information.
+ EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the unique identifier of an end user in
+// the system. It maybe a username, email address, or other identifier.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserPseudoID returns an attribute KeyValue conforming to the
+// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous
+// identifier of an end user. This identifier should be a random value that is
+// not directly linked or associated with the end user's actual identity.
+func EnduserPseudoID(val string) attribute.KeyValue {
+ return EnduserPseudoIDKey.String(val)
+}
+
+// Namespace: error
+const (
+ // ErrorMessageKey is the attribute Key conforming to the "error.message"
+ // semantic conventions. It represents a message providing more detail about an
+ // error in human-readable form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Unexpected input type: string", "The user has exceeded their
+ // storage quota"
+ // Note: `error.message` should provide additional context and detail about an
+ // error.
+ // It is NOT RECOMMENDED to duplicate the value of `error.type` in
+ // `error.message`.
+ // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in
+ // `error.message`.
+ //
+ // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded
+ // cardinality and overlap with span status.
+ ErrorMessageKey = attribute.Key("error.message")
+
+ // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic
+ // conventions. It represents the describes a class of error the operation ended
+ // with.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "timeout", "java.net.UnknownHostException",
+ // "server_certificate_invalid", "500"
+ // Note: The `error.type` SHOULD be predictable, and SHOULD have low
+ // cardinality.
+ //
+ // When `error.type` is set to a type (e.g., an exception type), its
+ // canonical class name identifying the type within the artifact SHOULD be used.
+ //
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library SHOULD be
+ // low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query time
+ // when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT set
+ // `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as HTTP
+ // or RPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // - Use a domain-specific attribute
+ // - Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+// ErrorMessage returns an attribute KeyValue conforming to the "error.message"
+// semantic conventions. It represents a message providing more detail about an
+// error in human-readable form.
+func ErrorMessage(val string) attribute.KeyValue {
+ return ErrorMessageKey.String(val)
+}
+
+// Enum values for error.type
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a
+ // custom value.
+ //
+ // Stability: stable
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Namespace: exception
+const (
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "Division by zero", "Can't convert 'int' object to str implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the exception
+ // should be preferred over the static type in languages that support it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "java.net.ConnectException", "OSError"
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the "exception.type"
+// semantic conventions. It represents the type of the exception (its
+// fully-qualified class name, if applicable). The dynamic type of the exception
+// should be preferred over the static type in languages that support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// Namespace: faas
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the serverless
+ // function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron Expression].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0/5 * * * ? *
+ //
+ // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name of
+ // the source on which the triggering operation was performed. For example, in
+ // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+ // database name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myBucketName", "myDbName"
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or S3 is
+ // the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myFile.txt", "myTableName"
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the describes
+ // the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string containing
+ // the time when the data was accessed in the [ISO 8601] format expressed in
+ // [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a string,
+ // that will be potentially reused for other invocations to the same
+ // function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de"
+ // Note: - **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation ID of
+ // the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+ // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name"
+ // semantic conventions. It represents the name of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: my-function
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+ // function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud region of
+ // the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: eu-central-1
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+ // function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory"
+ // semantic conventions. It represents the amount of memory available to the
+ // serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: It's recommended to set this attribute since e.g. too little memory can
+ // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+ // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+ // information (which must be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this runtime
+ // instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-function", "myazurefunctionapp/some-function-name"
+ // Note: This is the name of the function as configured/deployed on the FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function.name`]
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud providers/products:
+ //
+ // - **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ //
+ //
+ // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation time
+ // in the [ISO 8601] format expressed in [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic
+ // conventions. It represents the type of the trigger which caused this function
+ // invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic
+ // conventions. It represents the immutable version of the function being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "26", "pinkfroid-00002"
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // - **AWS Lambda:** The [function version]
+ // (an integer represented as a decimal string).
+ // - **Google Cloud Run (Services):** The [revision]
+ // (i.e., the function name plus the revision suffix).
+ // - **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment variable].
+ // - **Azure Functions:** Not applicable. Do not set this attribute.
+ //
+ //
+ // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html
+ // [revision]: https://cloud.google.com/run/docs/managing/revisions
+ // [`K_REVISION` environment variable]: https://cloud.google.com/run/docs/container-contract#services-env-vars
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart"
+// semantic conventions. It represents a boolean that is true if the serverless
+// function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic
+// conventions. It represents a string containing the schedule period as
+// [Cron Expression].
+//
+// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of the
+// source on which the triggering operation was performed. For example, in Cloud
+// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database
+// name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3 is
+// the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO 8601] format expressed in
+// [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance"
+// semantic conventions. It represents the execution environment ID as a string,
+// that will be potentially reused for other invocations to the same
+// function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID of
+// the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region of
+// the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic
+// conventions. It represents the name of the single function that this runtime
+// instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic
+// conventions. It represents a string containing the function invocation time in
+// the [ISO 8601] format expressed in [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the "faas.version"
+// semantic conventions. It represents the immutable version of the function
+// being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// Enum values for faas.document.operation
+var (
+ // When a new object is created.
+ // Stability: development
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified.
+ // Stability: development
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted.
+ // Stability: development
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Enum values for faas.invoked_provider
+var (
+ // Alibaba Cloud
+ // Stability: development
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ // Stability: development
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// Enum values for faas.trigger
+var (
+ // A response to some data source operation such as a database or filesystem
+ // read/write
+ // Stability: development
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ // Stability: development
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ // Stability: development
+ FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ // Stability: development
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ // Stability: development
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Namespace: feature_flag
+const (
+ // FeatureFlagContextIDKey is the attribute Key conforming to the
+ // "feature_flag.context.id" semantic conventions. It represents the unique
+ // identifier for the flag evaluation context. For example, the targeting key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db"
+ FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id")
+
+ // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key"
+ // semantic conventions. It represents the lookup key of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "logo-color"
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider.name" semantic conventions. It represents the
+ // identifies the feature flag provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "Flag Manager"
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name")
+
+ // FeatureFlagResultReasonKey is the attribute Key conforming to the
+ // "feature_flag.result.reason" semantic conventions. It represents the reason
+ // code which shows how a feature flag value was determined.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "static", "targeting_match", "error", "default"
+ FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason")
+
+ // FeatureFlagResultValueKey is the attribute Key conforming to the
+ // "feature_flag.result.value" semantic conventions. It represents the evaluated
+ // value of the feature flag.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "#ff0000", true, 3
+ // Note: With some feature flag providers, feature flag results can be quite
+ // large or contain private or sensitive details.
+ // Because of this, `feature_flag.result.variant` is often the preferred
+ // attribute if it is available.
+ //
+ // It may be desirable to redact or otherwise limit the size and scope of
+ // `feature_flag.result.value` if possible.
+ // Because the evaluated flag value is unstructured and may be any type, it is
+ // left to the instrumentation author to determine how best to achieve this.
+ FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value")
+
+ // FeatureFlagResultVariantKey is the attribute Key conforming to the
+ // "feature_flag.result.variant" semantic conventions. It represents a semantic
+ // identifier for an evaluated flag value.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "red", "true", "on"
+ // Note: A semantic identifier, commonly referred to as a variant, provides a
+ // means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant")
+
+ // FeatureFlagSetIDKey is the attribute Key conforming to the
+ // "feature_flag.set.id" semantic conventions. It represents the identifier of
+ // the [flag set] to which the feature flag belongs.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "proj-1", "ab98sgs", "service1/dev"
+ //
+ // [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+ FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id")
+
+ // FeatureFlagVersionKey is the attribute Key conforming to the
+ // "feature_flag.version" semantic conventions. It represents the version of the
+ // ruleset used during the evaluation. This may be any stable value which
+ // uniquely identifies the ruleset.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "1", "01ABCDEF"
+ FeatureFlagVersionKey = attribute.Key("feature_flag.version")
+)
+
+// FeatureFlagContextID returns an attribute KeyValue conforming to the
+// "feature_flag.context.id" semantic conventions. It represents the unique
+// identifier for the flag evaluation context. For example, the targeting key.
+func FeatureFlagContextID(val string) attribute.KeyValue {
+ return FeatureFlagContextIDKey.String(val)
+}
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the lookup key of the
+// feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider.name" semantic conventions. It represents the
+// identifies the feature flag provider.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagResultVariant returns an attribute KeyValue conforming to the
+// "feature_flag.result.variant" semantic conventions. It represents a semantic
+// identifier for an evaluated flag value.
+func FeatureFlagResultVariant(val string) attribute.KeyValue {
+ return FeatureFlagResultVariantKey.String(val)
+}
+
+// FeatureFlagSetID returns an attribute KeyValue conforming to the
+// "feature_flag.set.id" semantic conventions. It represents the identifier of
+// the [flag set] to which the feature flag belongs.
+//
+// [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+func FeatureFlagSetID(val string) attribute.KeyValue {
+ return FeatureFlagSetIDKey.String(val)
+}
+
+// FeatureFlagVersion returns an attribute KeyValue conforming to the
+// "feature_flag.version" semantic conventions. It represents the version of the
+// ruleset used during the evaluation. This may be any stable value which
+// uniquely identifies the ruleset.
+func FeatureFlagVersion(val string) attribute.KeyValue {
+ return FeatureFlagVersionKey.String(val)
+}
+
+// Enum values for feature_flag.result.reason
+var (
+ // The resolved value is static (no dynamic evaluation).
+ // Stability: release_candidate
+ FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static")
+ // The resolved value fell back to a pre-configured value (no dynamic evaluation
+ // occurred or dynamic evaluation yielded no result).
+ // Stability: release_candidate
+ FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default")
+ // The resolved value was the result of a dynamic evaluation, such as a rule or
+ // specific user-targeting.
+ // Stability: release_candidate
+ FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match")
+ // The resolved value was the result of pseudorandom assignment.
+ // Stability: release_candidate
+ FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split")
+ // The resolved value was retrieved from cache.
+ // Stability: release_candidate
+ FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached")
+ // The resolved value was the result of the flag being disabled in the
+ // management system.
+ // Stability: release_candidate
+ FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled")
+ // The reason for the resolved value could not be determined.
+ // Stability: release_candidate
+ FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown")
+ // The resolved value is non-authoritative or possibly out of date
+ // Stability: release_candidate
+ FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale")
+ // The resolved value was the result of an error.
+ // Stability: release_candidate
+ FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error")
+)
+
+// Namespace: file
+const (
+ // FileAccessedKey is the attribute Key conforming to the "file.accessed"
+ // semantic conventions. It represents the time when the file was last accessed,
+ // in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileAccessedKey = attribute.Key("file.accessed")
+
+ // FileAttributesKey is the attribute Key conforming to the "file.attributes"
+ // semantic conventions. It represents the array of file attributes.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "readonly", "hidden"
+ // Note: Attributes names depend on the OS or file system. Here’s a
+ // non-exhaustive list of values expected for this attribute: `archive`,
+ // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`,
+ // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`,
+ // `write`.
+ FileAttributesKey = attribute.Key("file.attributes")
+
+ // FileChangedKey is the attribute Key conforming to the "file.changed" semantic
+ // conventions. It represents the time when the file attributes or metadata was
+ // last changed, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: `file.changed` captures the time when any of the file's properties or
+ // attributes (including the content) are changed, while `file.modified`
+ // captures the timestamp when the file content is modified.
+ FileChangedKey = attribute.Key("file.changed")
+
+ // FileCreatedKey is the attribute Key conforming to the "file.created" semantic
+ // conventions. It represents the time when the file was created, in ISO 8601
+ // format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileCreatedKey = attribute.Key("file.created")
+
+ // FileDirectoryKey is the attribute Key conforming to the "file.directory"
+ // semantic conventions. It represents the directory where the file is located.
+ // It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/user", "C:\Program Files\MyApp"
+ FileDirectoryKey = attribute.Key("file.directory")
+
+ // FileExtensionKey is the attribute Key conforming to the "file.extension"
+ // semantic conventions. It represents the file extension, excluding the leading
+ // dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: When the file name has multiple extensions (example.tar.gz), only the
+ // last one should be captured ("gz", not "tar.gz").
+ FileExtensionKey = attribute.Key("file.extension")
+
+ // FileForkNameKey is the attribute Key conforming to the "file.fork_name"
+ // semantic conventions. It represents the name of the fork. A fork is
+ // additional data associated with a filesystem object.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Zone.Identifier"
+ // Note: On Linux, a resource fork is used to store additional data with a
+ // filesystem object. A file always has at least one fork for the data portion,
+ // and additional forks may exist.
+ // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default
+ // data stream for a file is just called $DATA. Zone.Identifier is commonly used
+ // by Windows to track contents downloaded from the Internet. An ADS is
+ // typically of the form: C:\path\to\filename.extension:some_fork_name, and
+ // some_fork_name is the value that should populate `fork_name`.
+ // `filename.extension` should populate `file.name`, and `extension` should
+ // populate `file.extension`. The full path, `file.path`, will include the fork
+ // name.
+ FileForkNameKey = attribute.Key("file.fork_name")
+
+ // FileGroupIDKey is the attribute Key conforming to the "file.group.id"
+ // semantic conventions. It represents the primary Group ID (GID) of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileGroupIDKey = attribute.Key("file.group.id")
+
+ // FileGroupNameKey is the attribute Key conforming to the "file.group.name"
+ // semantic conventions. It represents the primary group name of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "users"
+ FileGroupNameKey = attribute.Key("file.group.name")
+
+ // FileInodeKey is the attribute Key conforming to the "file.inode" semantic
+ // conventions. It represents the inode representing the file in the filesystem.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "256383"
+ FileInodeKey = attribute.Key("file.inode")
+
+ // FileModeKey is the attribute Key conforming to the "file.mode" semantic
+ // conventions. It represents the mode of the file in octal representation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0640"
+ FileModeKey = attribute.Key("file.mode")
+
+ // FileModifiedKey is the attribute Key conforming to the "file.modified"
+ // semantic conventions. It represents the time when the file content was last
+ // modified, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ FileModifiedKey = attribute.Key("file.modified")
+
+ // FileNameKey is the attribute Key conforming to the "file.name" semantic
+ // conventions. It represents the name of the file including the extension,
+ // without the directory.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.png"
+ FileNameKey = attribute.Key("file.name")
+
+ // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id"
+ // semantic conventions. It represents the user ID (UID) or security identifier
+ // (SID) of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileOwnerIDKey = attribute.Key("file.owner.id")
+
+ // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name"
+ // semantic conventions. It represents the username of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ FileOwnerNameKey = attribute.Key("file.owner.name")
+
+ // FilePathKey is the attribute Key conforming to the "file.path" semantic
+ // conventions. It represents the full path to the file, including the file
+ // name. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe"
+ FilePathKey = attribute.Key("file.path")
+
+ // FileSizeKey is the attribute Key conforming to the "file.size" semantic
+ // conventions. It represents the file size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FileSizeKey = attribute.Key("file.size")
+
+ // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the
+ // "file.symbolic_link.target_path" semantic conventions. It represents the path
+ // to the target of a symbolic link.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/python3"
+ // Note: This attribute is only applicable to symbolic links.
+ FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path")
+)
+
+// FileAccessed returns an attribute KeyValue conforming to the "file.accessed"
+// semantic conventions. It represents the time when the file was last accessed,
+// in ISO 8601 format.
+func FileAccessed(val string) attribute.KeyValue {
+ return FileAccessedKey.String(val)
+}
+
+// FileAttributes returns an attribute KeyValue conforming to the
+// "file.attributes" semantic conventions. It represents the array of file
+// attributes.
+func FileAttributes(val ...string) attribute.KeyValue {
+ return FileAttributesKey.StringSlice(val)
+}
+
+// FileChanged returns an attribute KeyValue conforming to the "file.changed"
+// semantic conventions. It represents the time when the file attributes or
+// metadata was last changed, in ISO 8601 format.
+func FileChanged(val string) attribute.KeyValue {
+ return FileChangedKey.String(val)
+}
+
+// FileCreated returns an attribute KeyValue conforming to the "file.created"
+// semantic conventions. It represents the time when the file was created, in ISO
+// 8601 format.
+func FileCreated(val string) attribute.KeyValue {
+ return FileCreatedKey.String(val)
+}
+
+// FileDirectory returns an attribute KeyValue conforming to the "file.directory"
+// semantic conventions. It represents the directory where the file is located.
+// It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+ return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the "file.extension"
+// semantic conventions. It represents the file extension, excluding the leading
+// dot.
+func FileExtension(val string) attribute.KeyValue {
+ return FileExtensionKey.String(val)
+}
+
+// FileForkName returns an attribute KeyValue conforming to the "file.fork_name"
+// semantic conventions. It represents the name of the fork. A fork is additional
+// data associated with a filesystem object.
+func FileForkName(val string) attribute.KeyValue {
+ return FileForkNameKey.String(val)
+}
+
+// FileGroupID returns an attribute KeyValue conforming to the "file.group.id"
+// semantic conventions. It represents the primary Group ID (GID) of the file.
+func FileGroupID(val string) attribute.KeyValue {
+ return FileGroupIDKey.String(val)
+}
+
+// FileGroupName returns an attribute KeyValue conforming to the
+// "file.group.name" semantic conventions. It represents the primary group name
+// of the file.
+func FileGroupName(val string) attribute.KeyValue {
+ return FileGroupNameKey.String(val)
+}
+
+// FileInode returns an attribute KeyValue conforming to the "file.inode"
+// semantic conventions. It represents the inode representing the file in the
+// filesystem.
+func FileInode(val string) attribute.KeyValue {
+ return FileInodeKey.String(val)
+}
+
+// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic
+// conventions. It represents the mode of the file in octal representation.
+func FileMode(val string) attribute.KeyValue {
+ return FileModeKey.String(val)
+}
+
+// FileModified returns an attribute KeyValue conforming to the "file.modified"
+// semantic conventions. It represents the time when the file content was last
+// modified, in ISO 8601 format.
+func FileModified(val string) attribute.KeyValue {
+ return FileModifiedKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name" semantic
+// conventions. It represents the name of the file including the extension,
+// without the directory.
+func FileName(val string) attribute.KeyValue {
+ return FileNameKey.String(val)
+}
+
+// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id"
+// semantic conventions. It represents the user ID (UID) or security identifier
+// (SID) of the file owner.
+func FileOwnerID(val string) attribute.KeyValue {
+ return FileOwnerIDKey.String(val)
+}
+
+// FileOwnerName returns an attribute KeyValue conforming to the
+// "file.owner.name" semantic conventions. It represents the username of the file
+// owner.
+func FileOwnerName(val string) attribute.KeyValue {
+ return FileOwnerNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path" semantic
+// conventions. It represents the full path to the file, including the file name.
+// It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+ return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size" semantic
+// conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+ return FileSizeKey.Int(val)
+}
+
+// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the
+// "file.symbolic_link.target_path" semantic conventions. It represents the path
+// to the target of a symbolic link.
+func FileSymbolicLinkTargetPath(val string) attribute.KeyValue {
+ return FileSymbolicLinkTargetPathKey.String(val)
+}
+
+// Namespace: gcp
+const (
+ // GCPAppHubApplicationContainerKey is the attribute Key conforming to the
+ // "gcp.apphub.application.container" semantic conventions. It represents the
+ // container within GCP where the AppHub application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-container-project"
+ GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container")
+
+ // GCPAppHubApplicationIDKey is the attribute Key conforming to the
+ // "gcp.apphub.application.id" semantic conventions. It represents the name of
+ // the application as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-application"
+ GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id")
+
+ // GCPAppHubApplicationLocationKey is the attribute Key conforming to the
+ // "gcp.apphub.application.location" semantic conventions. It represents the GCP
+ // zone or region where the application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1"
+ GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location")
+
+ // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.criticality_type" semantic conventions. It represents the
+ // criticality of a service indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type")
+
+ // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.environment_type" semantic conventions. It represents the
+ // environment of a service is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type")
+
+ // GCPAppHubServiceIDKey is the attribute Key conforming to the
+ // "gcp.apphub.service.id" semantic conventions. It represents the name of the
+ // service as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-service"
+ GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id")
+
+ // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.criticality_type" semantic conventions. It represents
+ // the criticality of a workload indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type")
+
+ // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.environment_type" semantic conventions. It represents
+ // the environment of a workload is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type")
+
+ // GCPAppHubWorkloadIDKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+ // workload as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-workload"
+ GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id")
+
+ // GCPAppHubDestinationApplicationContainerKey is the attribute Key conforming
+ // to the "gcp.apphub_destination.application.container" semantic conventions.
+ // It represents the container within GCP where the AppHub destination
+ // application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-container-project"
+ GCPAppHubDestinationApplicationContainerKey = attribute.Key("gcp.apphub_destination.application.container")
+
+ // GCPAppHubDestinationApplicationIDKey is the attribute Key conforming to the
+ // "gcp.apphub_destination.application.id" semantic conventions. It represents
+ // the name of the destination application as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-application"
+ GCPAppHubDestinationApplicationIDKey = attribute.Key("gcp.apphub_destination.application.id")
+
+ // GCPAppHubDestinationApplicationLocationKey is the attribute Key conforming to
+ // the "gcp.apphub_destination.application.location" semantic conventions. It
+ // represents the GCP zone or region where the destination application is
+ // defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1"
+ GCPAppHubDestinationApplicationLocationKey = attribute.Key("gcp.apphub_destination.application.location")
+
+ // GCPAppHubDestinationServiceCriticalityTypeKey is the attribute Key conforming
+ // to the "gcp.apphub_destination.service.criticality_type" semantic
+ // conventions. It represents the criticality of a destination workload
+ // indicates its importance to the business as specified in [AppHub type enum].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubDestinationServiceCriticalityTypeKey = attribute.Key("gcp.apphub_destination.service.criticality_type")
+
+ // GCPAppHubDestinationServiceEnvironmentTypeKey is the attribute Key conforming
+ // to the "gcp.apphub_destination.service.environment_type" semantic
+ // conventions. It represents the software lifecycle stage of a destination
+ // service as defined [AppHub environment type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubDestinationServiceEnvironmentTypeKey = attribute.Key("gcp.apphub_destination.service.environment_type")
+
+ // GCPAppHubDestinationServiceIDKey is the attribute Key conforming to the
+ // "gcp.apphub_destination.service.id" semantic conventions. It represents the
+ // name of the destination service as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-service"
+ GCPAppHubDestinationServiceIDKey = attribute.Key("gcp.apphub_destination.service.id")
+
+ // GCPAppHubDestinationWorkloadCriticalityTypeKey is the attribute Key
+ // conforming to the "gcp.apphub_destination.workload.criticality_type" semantic
+ // conventions. It represents the criticality of a destination workload
+ // indicates its importance to the business as specified in [AppHub type enum].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubDestinationWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub_destination.workload.criticality_type")
+
+ // GCPAppHubDestinationWorkloadEnvironmentTypeKey is the attribute Key
+ // conforming to the "gcp.apphub_destination.workload.environment_type" semantic
+ // conventions. It represents the environment of a destination workload is the
+ // stage of a software lifecycle as provided in the [AppHub environment type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubDestinationWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub_destination.workload.environment_type")
+
+ // GCPAppHubDestinationWorkloadIDKey is the attribute Key conforming to the
+ // "gcp.apphub_destination.workload.id" semantic conventions. It represents the
+ // name of the destination workload as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-workload"
+ GCPAppHubDestinationWorkloadIDKey = attribute.Key("gcp.apphub_destination.workload.id")
+
+ // GCPClientServiceKey is the attribute Key conforming to the
+ // "gcp.client.service" semantic conventions. It represents the identifies the
+ // Google Cloud service for which the official client library is intended.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "appengine", "run", "firestore", "alloydb", "spanner"
+ // Note: Intended to be a stable identifier for Google Cloud client libraries
+ // that is uniform across implementation languages. The value should be derived
+ // from the canonical service domain for the service; for example,
+ // 'foo.googleapis.com' should result in a value of 'foo'.
+ GCPClientServiceKey = attribute.Key("gcp.client.service")
+
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+ // the Cloud Run [execution] being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`] environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "job-name-xxxx", "sample-job-mdw84"
+ //
+ // [execution]: https://cloud.google.com/run/docs/managing/job-executions
+ // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+ // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1
+ //
+ // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+
+ // GCPGCEInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+ // of a GCE instance. This is the full value of the default or [custom hostname]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-host1234.example.com",
+ // "sample-vm.us-west1-b.c.my-project.internal"
+ //
+ // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+ GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGCEInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance name
+ // of a GCE instance. This is the value provided by `host.name`, the visible
+ // name of the instance in the Cloud Console UI, and the prefix for the default
+ // hostname of the instance as defined by the [default internal DNS name].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-1", "my-vm-name"
+ //
+ // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+ GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.container" semantic conventions. It represents the
+// container within GCP where the AppHub application is defined.
+func GCPAppHubApplicationContainer(val string) attribute.KeyValue {
+ return GCPAppHubApplicationContainerKey.String(val)
+}
+
+// GCPAppHubApplicationID returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.id" semantic conventions. It represents the name of
+// the application as configured in AppHub.
+func GCPAppHubApplicationID(val string) attribute.KeyValue {
+ return GCPAppHubApplicationIDKey.String(val)
+}
+
+// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.location" semantic conventions. It represents the GCP
+// zone or region where the application is defined.
+func GCPAppHubApplicationLocation(val string) attribute.KeyValue {
+ return GCPAppHubApplicationLocationKey.String(val)
+}
+
+// GCPAppHubServiceID returns an attribute KeyValue conforming to the
+// "gcp.apphub.service.id" semantic conventions. It represents the name of the
+// service as configured in AppHub.
+func GCPAppHubServiceID(val string) attribute.KeyValue {
+ return GCPAppHubServiceIDKey.String(val)
+}
+
+// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the
+// "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+// workload as configured in AppHub.
+func GCPAppHubWorkloadID(val string) attribute.KeyValue {
+ return GCPAppHubWorkloadIDKey.String(val)
+}
+
+// GCPAppHubDestinationApplicationContainer returns an attribute KeyValue
+// conforming to the "gcp.apphub_destination.application.container" semantic
+// conventions. It represents the container within GCP where the AppHub
+// destination application is defined.
+func GCPAppHubDestinationApplicationContainer(val string) attribute.KeyValue {
+ return GCPAppHubDestinationApplicationContainerKey.String(val)
+}
+
+// GCPAppHubDestinationApplicationID returns an attribute KeyValue conforming to
+// the "gcp.apphub_destination.application.id" semantic conventions. It
+// represents the name of the destination application as configured in AppHub.
+func GCPAppHubDestinationApplicationID(val string) attribute.KeyValue {
+ return GCPAppHubDestinationApplicationIDKey.String(val)
+}
+
+// GCPAppHubDestinationApplicationLocation returns an attribute KeyValue
+// conforming to the "gcp.apphub_destination.application.location" semantic
+// conventions. It represents the GCP zone or region where the destination
+// application is defined.
+func GCPAppHubDestinationApplicationLocation(val string) attribute.KeyValue {
+ return GCPAppHubDestinationApplicationLocationKey.String(val)
+}
+
+// GCPAppHubDestinationServiceID returns an attribute KeyValue conforming to the
+// "gcp.apphub_destination.service.id" semantic conventions. It represents the
+// name of the destination service as configured in AppHub.
+func GCPAppHubDestinationServiceID(val string) attribute.KeyValue {
+ return GCPAppHubDestinationServiceIDKey.String(val)
+}
+
+// GCPAppHubDestinationWorkloadID returns an attribute KeyValue conforming to the
+// "gcp.apphub_destination.workload.id" semantic conventions. It represents the
+// name of the destination workload as configured in AppHub.
+func GCPAppHubDestinationWorkloadID(val string) attribute.KeyValue {
+ return GCPAppHubDestinationWorkloadIDKey.String(val)
+}
+
+// GCPClientService returns an attribute KeyValue conforming to the
+// "gcp.client.service" semantic conventions. It represents the identifies the
+// Google Cloud service for which the official client library is intended.
+func GCPClientService(val string) attribute.KeyValue {
+ return GCPClientServiceKey.String(val)
+}
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+// the Cloud Run [execution] being run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`] environment variable.
+//
+// [execution]: https://cloud.google.com/run/docs/managing/job-executions
+// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+// environment variable.
+//
+// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom hostname]
+// .
+//
+// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+func GCPGCEInstanceHostname(val string) attribute.KeyValue {
+ return GCPGCEInstanceHostnameKey.String(val)
+}
+
+// GCPGCEInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance name
+// of a GCE instance. This is the value provided by `host.name`, the visible name
+// of the instance in the Cloud Console UI, and the prefix for the default
+// hostname of the instance as defined by the [default internal DNS name].
+//
+// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+func GCPGCEInstanceName(val string) attribute.KeyValue {
+ return GCPGCEInstanceNameKey.String(val)
+}
+
+// Enum values for gcp.apphub.service.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.service.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Enum values for gcp.apphub.workload.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.workload.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Enum values for gcp.apphub_destination.service.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubDestinationServiceCriticalityTypeMissionCritical = GCPAppHubDestinationServiceCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubDestinationServiceCriticalityTypeHigh = GCPAppHubDestinationServiceCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubDestinationServiceCriticalityTypeMedium = GCPAppHubDestinationServiceCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubDestinationServiceCriticalityTypeLow = GCPAppHubDestinationServiceCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub_destination.service.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubDestinationServiceEnvironmentTypeProduction = GCPAppHubDestinationServiceEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubDestinationServiceEnvironmentTypeStaging = GCPAppHubDestinationServiceEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubDestinationServiceEnvironmentTypeTest = GCPAppHubDestinationServiceEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubDestinationServiceEnvironmentTypeDevelopment = GCPAppHubDestinationServiceEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Enum values for gcp.apphub_destination.workload.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubDestinationWorkloadCriticalityTypeMissionCritical = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubDestinationWorkloadCriticalityTypeHigh = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubDestinationWorkloadCriticalityTypeMedium = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubDestinationWorkloadCriticalityTypeLow = GCPAppHubDestinationWorkloadCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub_destination.workload.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubDestinationWorkloadEnvironmentTypeProduction = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubDestinationWorkloadEnvironmentTypeStaging = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubDestinationWorkloadEnvironmentTypeTest = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubDestinationWorkloadEnvironmentTypeDevelopment = GCPAppHubDestinationWorkloadEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Namespace: gen_ai
+const (
+ // GenAIAgentDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.agent.description" semantic conventions. It represents the free-form
+ // description of the GenAI agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Helps with math problems", "Generates fiction stories"
+ GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description")
+
+ // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id"
+ // semantic conventions. It represents the unique identifier of the GenAI agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIAgentIDKey = attribute.Key("gen_ai.agent.id")
+
+ // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name"
+ // semantic conventions. It represents the human-readable name of the GenAI
+ // agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Math Tutor", "Fiction Writer"
+ GenAIAgentNameKey = attribute.Key("gen_ai.agent.name")
+
+ // GenAIConversationIDKey is the attribute Key conforming to the
+ // "gen_ai.conversation.id" semantic conventions. It represents the unique
+ // identifier for a conversation (session, thread), used to store and correlate
+ // messages within this conversation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id")
+
+ // GenAIDataSourceIDKey is the attribute Key conforming to the
+ // "gen_ai.data_source.id" semantic conventions. It represents the data source
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "H7STPQYOND"
+ // Note: Data sources are used by AI agents and RAG applications to store
+ // grounding data. A data source may be an external database, object store,
+ // document collection, website, or any other storage system used by the GenAI
+ // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier
+ // used by the GenAI system rather than a name specific to the external storage,
+ // such as a database or object store. Semantic conventions referencing
+ // `gen_ai.data_source.id` MAY also leverage additional attributes, such as
+ // `db.*`, to further identify and describe the data source.
+ GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id")
+
+ // GenAIEmbeddingsDimensionCountKey is the attribute Key conforming to the
+ // "gen_ai.embeddings.dimension.count" semantic conventions. It represents the
+ // number of dimensions the resulting output embeddings should have.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 512, 1024
+ GenAIEmbeddingsDimensionCountKey = attribute.Key("gen_ai.embeddings.dimension.count")
+
+ // GenAIEvaluationExplanationKey is the attribute Key conforming to the
+ // "gen_ai.evaluation.explanation" semantic conventions. It represents a
+ // free-form explanation for the assigned score provided by the evaluator.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "The response is factually accurate but lacks sufficient detail to
+ // fully address the question."
+ GenAIEvaluationExplanationKey = attribute.Key("gen_ai.evaluation.explanation")
+
+ // GenAIEvaluationNameKey is the attribute Key conforming to the
+ // "gen_ai.evaluation.name" semantic conventions. It represents the name of the
+ // evaluation metric used for the GenAI response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Relevance", "IntentResolution"
+ GenAIEvaluationNameKey = attribute.Key("gen_ai.evaluation.name")
+
+ // GenAIEvaluationScoreLabelKey is the attribute Key conforming to the
+ // "gen_ai.evaluation.score.label" semantic conventions. It represents the human
+ // readable label for evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "relevant", "not_relevant", "correct", "incorrect", "pass", "fail"
+ // Note: This attribute provides a human-readable interpretation of the
+ // evaluation score produced by an evaluator. For example, a score value of 1
+ // could mean "relevant" in one evaluation system and "not relevant" in another,
+ // depending on the scoring range and evaluator. The label SHOULD have low
+ // cardinality. Possible values depend on the evaluation metric and evaluator
+ // used; implementations SHOULD document the possible values.
+ GenAIEvaluationScoreLabelKey = attribute.Key("gen_ai.evaluation.score.label")
+
+ // GenAIEvaluationScoreValueKey is the attribute Key conforming to the
+ // "gen_ai.evaluation.score.value" semantic conventions. It represents the
+ // evaluation score returned by the evaluator.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 4.0
+ GenAIEvaluationScoreValueKey = attribute.Key("gen_ai.evaluation.score.value")
+
+ // GenAIInputMessagesKey is the attribute Key conforming to the
+ // "gen_ai.input.messages" semantic conventions. It represents the chat history
+ // provided to the model as an input.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n
+ // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n
+ // "parts": [\n {\n "type": "tool_call",\n "id":
+ // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n
+ // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n
+ // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n
+ // "result": "rainy, 57°F"\n }\n ]\n }\n]\n"
+ // Note: Instrumentations MUST follow [Input messages JSON schema].
+ // When the attribute is recorded on events, it MUST be recorded in structured
+ // form. When recorded on spans, it MAY be recorded as a JSON string if
+ // structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Messages MUST be provided in the order they were sent to the model.
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // input messages.
+ //
+ // > [!Warning]
+ // > This attribute is likely to contain sensitive information including
+ // > user/PII data.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages")
+
+ // GenAIOperationNameKey is the attribute Key conforming to the
+ // "gen_ai.operation.name" semantic conventions. It represents the name of the
+ // operation being performed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If one of the predefined values applies, but specific system uses a
+ // different name it's RECOMMENDED to document it in the semantic conventions
+ // for specific GenAI system and use system-specific name in the
+ // instrumentation. If a different name is not documented, instrumentation
+ // libraries SHOULD use applicable predefined value.
+ GenAIOperationNameKey = attribute.Key("gen_ai.operation.name")
+
+ // GenAIOutputMessagesKey is the attribute Key conforming to the
+ // "gen_ai.output.messages" semantic conventions. It represents the messages
+ // returned by the model where each message represents a specific model response
+ // (choice, candidate).
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n
+ // "content": "The weather in Paris is currently rainy with a temperature of
+ // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n"
+ // Note: Instrumentations MUST follow [Output messages JSON schema]
+ //
+ // Each message represents a single output choice/candidate generated by
+ // the model. Each message corresponds to exactly one generation
+ // (choice/candidate) and vice versa - one choice cannot be split across
+ // multiple messages or one message cannot contain parts from multiple choices.
+ //
+ // When the attribute is recorded on events, it MUST be recorded in structured
+ // form. When recorded on spans, it MAY be recorded as a JSON string if
+ // structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // output messages.
+ //
+ // > [!Warning]
+ // > This attribute is likely to contain sensitive information including
+ // > user/PII data.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages")
+
+ // GenAIOutputTypeKey is the attribute Key conforming to the
+ // "gen_ai.output.type" semantic conventions. It represents the represents the
+ // content type requested by the client.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute SHOULD be used when the client requests output of a
+ // specific type. The model may return zero or more outputs of this type.
+ // This attribute specifies the output modality and not the actual output
+ // format. For example, if an image is requested, the actual output could be a
+ // URL pointing to an image file.
+ // Additional output format details may be recorded in the future in the
+ // `gen_ai.output.{type}.*` attributes.
+ GenAIOutputTypeKey = attribute.Key("gen_ai.output.type")
+
+ // GenAIPromptNameKey is the attribute Key conforming to the
+ // "gen_ai.prompt.name" semantic conventions. It represents the name of the
+ // prompt that uniquely identifies it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "analyze-code"
+ GenAIPromptNameKey = attribute.Key("gen_ai.prompt.name")
+
+ // GenAIProviderNameKey is the attribute Key conforming to the
+ // "gen_ai.provider.name" semantic conventions. It represents the Generative AI
+ // provider as identified by the client or server instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The attribute SHOULD be set based on the instrumentation's best
+ // knowledge and may differ from the actual model provider.
+ //
+ // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms
+ // are accessible using the OpenAI REST API and corresponding client libraries,
+ // but may proxy or host models from different providers.
+ //
+ // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address`
+ // attributes may help identify the actual system in use.
+ //
+ // The `gen_ai.provider.name` attribute acts as a discriminator that
+ // identifies the GenAI telemetry format flavor specific to that provider
+ // within GenAI semantic conventions.
+ // It SHOULD be set consistently with provider-specific attributes and signals.
+ // For example, GenAI spans, metrics, and events related to AWS Bedrock
+ // should have the `gen_ai.provider.name` set to `aws.bedrock` and include
+ // applicable `aws.bedrock.*` attributes and are not expected to include
+ // `openai.*` attributes.
+ GenAIProviderNameKey = attribute.Key("gen_ai.provider.name")
+
+ // GenAIRequestChoiceCountKey is the attribute Key conforming to the
+ // "gen_ai.request.choice.count" semantic conventions. It represents the target
+ // number of candidate completions to return.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3
+ GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count")
+
+ // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the
+ // "gen_ai.request.encoding_formats" semantic conventions. It represents the
+ // encoding formats requested in an embeddings operation, if specified.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "base64"], ["float", "binary"
+ // Note: In some GenAI systems the encoding formats are called embedding types.
+ // Also, some GenAI systems only accept a single format per request.
+ GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats")
+
+ // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+ // frequency penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty")
+
+ // GenAIRequestMaxTokensKey is the attribute Key conforming to the
+ // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+ // number of tokens the model generates for a request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+ // GenAIRequestModelKey is the attribute Key conforming to the
+ // "gen_ai.request.model" semantic conventions. It represents the name of the
+ // GenAI model a request is being made to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: gpt-4
+ GenAIRequestModelKey = attribute.Key("gen_ai.request.model")
+
+ // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.presence_penalty" semantic conventions. It represents the
+ // presence penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty")
+
+ // GenAIRequestSeedKey is the attribute Key conforming to the
+ // "gen_ai.request.seed" semantic conventions. It represents the requests with
+ // same seed value more likely to return same result.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed")
+
+ // GenAIRequestStopSequencesKey is the attribute Key conforming to the
+ // "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+ // of sequences that the model will use to stop generating further tokens.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "forest", "lived"
+ GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences")
+
+ // GenAIRequestTemperatureKey is the attribute Key conforming to the
+ // "gen_ai.request.temperature" semantic conventions. It represents the
+ // temperature setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.0
+ GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+ // GenAIRequestTopKKey is the attribute Key conforming to the
+ // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k")
+
+ // GenAIRequestTopPKey is the attribute Key conforming to the
+ // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+ // GenAIResponseFinishReasonsKey is the attribute Key conforming to the
+ // "gen_ai.response.finish_reasons" semantic conventions. It represents the
+ // array of reasons the model stopped generating tokens, corresponding to each
+ // generation received.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "stop"], ["stop", "length"
+ GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+ // GenAIResponseIDKey is the attribute Key conforming to the
+ // "gen_ai.response.id" semantic conventions. It represents the unique
+ // identifier for the completion.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "chatcmpl-123"
+ GenAIResponseIDKey = attribute.Key("gen_ai.response.id")
+
+ // GenAIResponseModelKey is the attribute Key conforming to the
+ // "gen_ai.response.model" semantic conventions. It represents the name of the
+ // model that generated the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gpt-4-0613"
+ GenAIResponseModelKey = attribute.Key("gen_ai.response.model")
+
+ // GenAISystemInstructionsKey is the attribute Key conforming to the
+ // "gen_ai.system_instructions" semantic conventions. It represents the system
+ // message or instructions provided to the GenAI model separately from the chat
+ // history.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet
+ // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type":
+ // "text",\n "content": "You are a language translator."\n },\n {\n "type":
+ // "text",\n "content": "Your mission is to translate text in English to
+ // French."\n }\n]\n"
+ // Note: This attribute SHOULD be used when the corresponding provider or API
+ // allows to provide system instructions or messages separately from the
+ // chat history.
+ //
+ // Instructions that are part of the chat history SHOULD be recorded in
+ // `gen_ai.input.messages` attribute instead.
+ //
+ // Instrumentations MUST follow [System instructions JSON schema].
+ //
+ // When recorded on spans, it MAY be recorded as a JSON string if structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // system instructions.
+ //
+ // > [!Warning]
+ // > This attribute may contain sensitive information.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions")
+
+ // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type"
+ // semantic conventions. It represents the type of token being counted.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "input", "output"
+ GenAITokenTypeKey = attribute.Key("gen_ai.token.type")
+
+ // GenAIToolCallArgumentsKey is the attribute Key conforming to the
+ // "gen_ai.tool.call.arguments" semantic conventions. It represents the
+ // parameters passed to the tool call.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{\n "location": "San Francisco?",\n "date": "2025-10-01"\n}\n"
+ // Note: > [!WARNING]
+ //
+ // > This attribute may contain sensitive information.
+ //
+ // It's expected to be an object - in case a serialized string is available
+ // to the instrumentation, the instrumentation SHOULD do the best effort to
+ // deserialize it to an object. When recorded on spans, it MAY be recorded as a
+ // JSON string if structured format is not supported and SHOULD be recorded in
+ // structured form otherwise.
+ GenAIToolCallArgumentsKey = attribute.Key("gen_ai.tool.call.arguments")
+
+ // GenAIToolCallIDKey is the attribute Key conforming to the
+ // "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4"
+ GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id")
+
+ // GenAIToolCallResultKey is the attribute Key conforming to the
+ // "gen_ai.tool.call.result" semantic conventions. It represents the result
+ // returned by the tool call (if any and if execution was successful).
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{\n "temperature_range": {\n "high": 75,\n "low": 60\n },\n
+ // "conditions": "sunny"\n}\n"
+ // Note: > [!WARNING]
+ //
+ // > This attribute may contain sensitive information.
+ //
+ // It's expected to be an object - in case a serialized string is available
+ // to the instrumentation, the instrumentation SHOULD do the best effort to
+ // deserialize it to an object. When recorded on spans, it MAY be recorded as a
+ // JSON string if structured format is not supported and SHOULD be recorded in
+ // structured form otherwise.
+ GenAIToolCallResultKey = attribute.Key("gen_ai.tool.call.result")
+
+ // GenAIToolDefinitionsKey is the attribute Key conforming to the
+ // "gen_ai.tool.definitions" semantic conventions. It represents the list of
+ // source system tool definitions available to the GenAI agent or model.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "type": "function",\n "name": "get_current_weather",\n
+ // "description": "Get the current weather in a given location",\n "parameters":
+ // {\n "type": "object",\n "properties": {\n "location": {\n "type": "string",\n
+ // "description": "The city and state, e.g. San Francisco, CA"\n },\n "unit":
+ // {\n "type": "string",\n "enum": [\n "celsius",\n "fahrenheit"\n ]\n }\n },\n
+ // "required": [\n "location",\n "unit"\n ]\n }\n }\n]\n"
+ // Note: The value of this attribute matches source system tool definition
+ // format.
+ //
+ // It's expected to be an array of objects where each object represents a tool
+ // definition. In case a serialized string is available
+ // to the instrumentation, the instrumentation SHOULD do the best effort to
+ // deserialize it to an array. When recorded on spans, it MAY be recorded as a
+ // JSON string if structured format is not supported and SHOULD be recorded in
+ // structured form otherwise.
+ //
+ // Since this attribute could be large, it's NOT RECOMMENDED to populate
+ // it by default. Instrumentations MAY provide a way to enable
+ // populating this attribute.
+ GenAIToolDefinitionsKey = attribute.Key("gen_ai.tool.definitions")
+
+ // GenAIToolDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.tool.description" semantic conventions. It represents the tool
+ // description.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Multiply two numbers"
+ GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description")
+
+ // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name"
+ // semantic conventions. It represents the name of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Flights"
+ GenAIToolNameKey = attribute.Key("gen_ai.tool.name")
+
+ // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type"
+ // semantic conventions. It represents the type of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "function", "extension", "datastore"
+ // Note: Extension: A tool executed on the agent-side to directly call external
+ // APIs, bridging the gap between the agent and real-world systems.
+ // Agent-side operations involve actions that are performed by the agent on the
+ // server or within the agent's controlled environment.
+ // Function: A tool executed on the client-side, where the agent generates
+ // parameters for a predefined function, and the client executes the logic.
+ // Client-side operations are actions taken on the user's end or within the
+ // client application.
+ // Datastore: A tool used by the agent to access and query structured or
+ // unstructured external data for retrieval-augmented tasks or knowledge
+ // updates.
+ GenAIToolTypeKey = attribute.Key("gen_ai.tool.type")
+
+ // GenAIUsageInputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+ // tokens used in the GenAI input (prompt).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens")
+
+ // GenAIUsageOutputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.output_tokens" semantic conventions. It represents the number
+ // of tokens used in the GenAI response (completion).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 180
+ GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens")
+)
+
+// GenAIAgentDescription returns an attribute KeyValue conforming to the
+// "gen_ai.agent.description" semantic conventions. It represents the free-form
+// description of the GenAI agent provided by the application.
+func GenAIAgentDescription(val string) attribute.KeyValue {
+ return GenAIAgentDescriptionKey.String(val)
+}
+
+// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id"
+// semantic conventions. It represents the unique identifier of the GenAI agent.
+func GenAIAgentID(val string) attribute.KeyValue {
+ return GenAIAgentIDKey.String(val)
+}
+
+// GenAIAgentName returns an attribute KeyValue conforming to the
+// "gen_ai.agent.name" semantic conventions. It represents the human-readable
+// name of the GenAI agent provided by the application.
+func GenAIAgentName(val string) attribute.KeyValue {
+ return GenAIAgentNameKey.String(val)
+}
+
+// GenAIConversationID returns an attribute KeyValue conforming to the
+// "gen_ai.conversation.id" semantic conventions. It represents the unique
+// identifier for a conversation (session, thread), used to store and correlate
+// messages within this conversation.
+func GenAIConversationID(val string) attribute.KeyValue {
+ return GenAIConversationIDKey.String(val)
+}
+
+// GenAIDataSourceID returns an attribute KeyValue conforming to the
+// "gen_ai.data_source.id" semantic conventions. It represents the data source
+// identifier.
+func GenAIDataSourceID(val string) attribute.KeyValue {
+ return GenAIDataSourceIDKey.String(val)
+}
+
+// GenAIEmbeddingsDimensionCount returns an attribute KeyValue conforming to the
+// "gen_ai.embeddings.dimension.count" semantic conventions. It represents the
+// number of dimensions the resulting output embeddings should have.
+func GenAIEmbeddingsDimensionCount(val int) attribute.KeyValue {
+ return GenAIEmbeddingsDimensionCountKey.Int(val)
+}
+
+// GenAIEvaluationExplanation returns an attribute KeyValue conforming to the
+// "gen_ai.evaluation.explanation" semantic conventions. It represents a
+// free-form explanation for the assigned score provided by the evaluator.
+func GenAIEvaluationExplanation(val string) attribute.KeyValue {
+ return GenAIEvaluationExplanationKey.String(val)
+}
+
+// GenAIEvaluationName returns an attribute KeyValue conforming to the
+// "gen_ai.evaluation.name" semantic conventions. It represents the name of the
+// evaluation metric used for the GenAI response.
+func GenAIEvaluationName(val string) attribute.KeyValue {
+ return GenAIEvaluationNameKey.String(val)
+}
+
+// GenAIEvaluationScoreLabel returns an attribute KeyValue conforming to the
+// "gen_ai.evaluation.score.label" semantic conventions. It represents the human
+// readable label for evaluation.
+func GenAIEvaluationScoreLabel(val string) attribute.KeyValue {
+ return GenAIEvaluationScoreLabelKey.String(val)
+}
+
+// GenAIEvaluationScoreValue returns an attribute KeyValue conforming to the
+// "gen_ai.evaluation.score.value" semantic conventions. It represents the
+// evaluation score returned by the evaluator.
+func GenAIEvaluationScoreValue(val float64) attribute.KeyValue {
+ return GenAIEvaluationScoreValueKey.Float64(val)
+}
+
+// GenAIPromptName returns an attribute KeyValue conforming to the
+// "gen_ai.prompt.name" semantic conventions. It represents the name of the
+// prompt that uniquely identifies it.
+func GenAIPromptName(val string) attribute.KeyValue {
+ return GenAIPromptNameKey.String(val)
+}
+
+// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the
+// "gen_ai.request.choice.count" semantic conventions. It represents the target
+// number of candidate completions to return.
+func GenAIRequestChoiceCount(val int) attribute.KeyValue {
+ return GenAIRequestChoiceCountKey.Int(val)
+}
+
+// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the
+// "gen_ai.request.encoding_formats" semantic conventions. It represents the
+// encoding formats requested in an embeddings operation, if specified.
+func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue {
+ return GenAIRequestEncodingFormatsKey.StringSlice(val)
+}
+
+// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+// frequency penalty setting for the GenAI request.
+func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue {
+ return GenAIRequestFrequencyPenaltyKey.Float64(val)
+}
+
+// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the model generates for a request.
+func GenAIRequestMaxTokens(val int) attribute.KeyValue {
+ return GenAIRequestMaxTokensKey.Int(val)
+}
+
+// GenAIRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// GenAI model a request is being made to.
+func GenAIRequestModel(val string) attribute.KeyValue {
+ return GenAIRequestModelKey.String(val)
+}
+
+// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.presence_penalty" semantic conventions. It represents the
+// presence penalty setting for the GenAI request.
+func GenAIRequestPresencePenalty(val float64) attribute.KeyValue {
+ return GenAIRequestPresencePenaltyKey.Float64(val)
+}
+
+// GenAIRequestSeed returns an attribute KeyValue conforming to the
+// "gen_ai.request.seed" semantic conventions. It represents the requests with
+// same seed value more likely to return same result.
+func GenAIRequestSeed(val int) attribute.KeyValue {
+ return GenAIRequestSeedKey.Int(val)
+}
+
+// GenAIRequestStopSequences returns an attribute KeyValue conforming to the
+// "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+// of sequences that the model will use to stop generating further tokens.
+func GenAIRequestStopSequences(val ...string) attribute.KeyValue {
+ return GenAIRequestStopSequencesKey.StringSlice(val)
+}
+
+// GenAIRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the GenAI request.
+func GenAIRequestTemperature(val float64) attribute.KeyValue {
+ return GenAIRequestTemperatureKey.Float64(val)
+}
+
+// GenAIRequestTopK returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+// setting for the GenAI request.
+func GenAIRequestTopK(val float64) attribute.KeyValue {
+ return GenAIRequestTopKKey.Float64(val)
+}
+
+// GenAIRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+// setting for the GenAI request.
+func GenAIRequestTopP(val float64) attribute.KeyValue {
+ return GenAIRequestTopPKey.Float64(val)
+}
+
+// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the
+// "gen_ai.response.finish_reasons" semantic conventions. It represents the array
+// of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAIResponseFinishReasons(val ...string) attribute.KeyValue {
+ return GenAIResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAIResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique identifier
+// for the completion.
+func GenAIResponseID(val string) attribute.KeyValue {
+ return GenAIResponseIDKey.String(val)
+}
+
+// GenAIResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// model that generated the response.
+func GenAIResponseModel(val string) attribute.KeyValue {
+ return GenAIResponseModelKey.String(val)
+}
+
+// GenAIToolCallID returns an attribute KeyValue conforming to the
+// "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+// identifier.
+func GenAIToolCallID(val string) attribute.KeyValue {
+ return GenAIToolCallIDKey.String(val)
+}
+
+// GenAIToolDescription returns an attribute KeyValue conforming to the
+// "gen_ai.tool.description" semantic conventions. It represents the tool
+// description.
+func GenAIToolDescription(val string) attribute.KeyValue {
+ return GenAIToolDescriptionKey.String(val)
+}
+
+// GenAIToolName returns an attribute KeyValue conforming to the
+// "gen_ai.tool.name" semantic conventions. It represents the name of the tool
+// utilized by the agent.
+func GenAIToolName(val string) attribute.KeyValue {
+ return GenAIToolNameKey.String(val)
+}
+
+// GenAIToolType returns an attribute KeyValue conforming to the
+// "gen_ai.tool.type" semantic conventions. It represents the type of the tool
+// utilized by the agent.
+func GenAIToolType(val string) attribute.KeyValue {
+ return GenAIToolTypeKey.String(val)
+}
+
+// GenAIUsageInputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI input (prompt).
+func GenAIUsageInputTokens(val int) attribute.KeyValue {
+ return GenAIUsageInputTokensKey.Int(val)
+}
+
+// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI response (completion).
+func GenAIUsageOutputTokens(val int) attribute.KeyValue {
+ return GenAIUsageOutputTokensKey.Int(val)
+}
+
+// Enum values for gen_ai.operation.name
+var (
+ // Chat completion operation such as [OpenAI Chat API]
+ // Stability: development
+ //
+ // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat
+ GenAIOperationNameChat = GenAIOperationNameKey.String("chat")
+ // Multimodal content generation operation such as [Gemini Generate Content]
+ // Stability: development
+ //
+ // [Gemini Generate Content]: https://ai.google.dev/api/generate-content
+ GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content")
+ // Text completions operation such as [OpenAI Completions API (Legacy)]
+ // Stability: development
+ //
+ // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions
+ GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion")
+ // Embeddings operation such as [OpenAI Create embeddings API]
+ // Stability: development
+ //
+ // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create
+ GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings")
+ // Create GenAI agent
+ // Stability: development
+ GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent")
+ // Invoke GenAI agent
+ // Stability: development
+ GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent")
+ // Execute a tool
+ // Stability: development
+ GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool")
+)
+
+// Enum values for gen_ai.output.type
+var (
+ // Plain text
+ // Stability: development
+ GenAIOutputTypeText = GenAIOutputTypeKey.String("text")
+ // JSON object with known or unknown schema
+ // Stability: development
+ GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json")
+ // Image
+ // Stability: development
+ GenAIOutputTypeImage = GenAIOutputTypeKey.String("image")
+ // Speech
+ // Stability: development
+ GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech")
+)
+
+// Enum values for gen_ai.provider.name
+var (
+ // [OpenAI]
+ // Stability: development
+ //
+ // [OpenAI]: https://openai.com/
+ GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai")
+ // Any Google generative AI endpoint
+ // Stability: development
+ GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai")
+ // [Vertex AI]
+ // Stability: development
+ //
+ // [Vertex AI]: https://cloud.google.com/vertex-ai
+ GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai")
+ // [Gemini]
+ // Stability: development
+ //
+ // [Gemini]: https://cloud.google.com/products/gemini
+ GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini")
+ // [Anthropic]
+ // Stability: development
+ //
+ // [Anthropic]: https://www.anthropic.com/
+ GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic")
+ // [Cohere]
+ // Stability: development
+ //
+ // [Cohere]: https://cohere.com/
+ GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere")
+ // Azure AI Inference
+ // Stability: development
+ GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference")
+ // [Azure OpenAI]
+ // Stability: development
+ //
+ // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/
+ GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai")
+ // [IBM Watsonx AI]
+ // Stability: development
+ //
+ // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai
+ GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai")
+ // [AWS Bedrock]
+ // Stability: development
+ //
+ // [AWS Bedrock]: https://aws.amazon.com/bedrock
+ GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock")
+ // [Perplexity]
+ // Stability: development
+ //
+ // [Perplexity]: https://www.perplexity.ai/
+ GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity")
+ // [xAI]
+ // Stability: development
+ //
+ // [xAI]: https://x.ai/
+ GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai")
+ // [DeepSeek]
+ // Stability: development
+ //
+ // [DeepSeek]: https://www.deepseek.com/
+ GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek")
+ // [Groq]
+ // Stability: development
+ //
+ // [Groq]: https://groq.com/
+ GenAIProviderNameGroq = GenAIProviderNameKey.String("groq")
+ // [Mistral AI]
+ // Stability: development
+ //
+ // [Mistral AI]: https://mistral.ai/
+ GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai")
+)
+
+// Enum values for gen_ai.token.type
+var (
+ // Input tokens (prompt, input, etc.)
+ // Stability: development
+ GenAITokenTypeInput = GenAITokenTypeKey.String("input")
+ // Output tokens (completion, response, etc.)
+ // Stability: development
+ GenAITokenTypeOutput = GenAITokenTypeKey.String("output")
+)
+
+// Namespace: geo
+const (
+ // GeoContinentCodeKey is the attribute Key conforming to the
+ // "geo.continent.code" semantic conventions. It represents the two-letter code
+ // representing continent’s name.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ GeoContinentCodeKey = attribute.Key("geo.continent.code")
+
+ // GeoCountryISOCodeKey is the attribute Key conforming to the
+ // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+ // Country Code ([ISO 3166-1 alpha2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA"
+ //
+ // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+ GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code")
+
+ // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name"
+ // semantic conventions. It represents the locality name. Represents the name of
+ // a city, town, village, or similar populated place.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Montreal", "Berlin"
+ GeoLocalityNameKey = attribute.Key("geo.locality.name")
+
+ // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat"
+ // semantic conventions. It represents the latitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 45.505918
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLatKey = attribute.Key("geo.location.lat")
+
+ // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon"
+ // semantic conventions. It represents the longitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: -73.61483
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLonKey = attribute.Key("geo.location.lon")
+
+ // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code"
+ // semantic conventions. It represents the postal code associated with the
+ // location. Values appropriate for this field may also be known as a postcode
+ // or ZIP code and will vary widely from country to country.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "94040"
+ GeoPostalCodeKey = attribute.Key("geo.postal_code")
+
+ // GeoRegionISOCodeKey is the attribute Key conforming to the
+ // "geo.region.iso_code" semantic conventions. It represents the region ISO code
+ // ([ISO 3166-2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA-QC"
+ //
+ // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+ GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code")
+)
+
+// GeoCountryISOCode returns an attribute KeyValue conforming to the
+// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+// Country Code ([ISO 3166-1 alpha2]).
+//
+// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+func GeoCountryISOCode(val string) attribute.KeyValue {
+ return GeoCountryISOCodeKey.String(val)
+}
+
+// GeoLocalityName returns an attribute KeyValue conforming to the
+// "geo.locality.name" semantic conventions. It represents the locality name.
+// Represents the name of a city, town, village, or similar populated place.
+func GeoLocalityName(val string) attribute.KeyValue {
+ return GeoLocalityNameKey.String(val)
+}
+
+// GeoLocationLat returns an attribute KeyValue conforming to the
+// "geo.location.lat" semantic conventions. It represents the latitude of the geo
+// location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLat(val float64) attribute.KeyValue {
+ return GeoLocationLatKey.Float64(val)
+}
+
+// GeoLocationLon returns an attribute KeyValue conforming to the
+// "geo.location.lon" semantic conventions. It represents the longitude of the
+// geo location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLon(val float64) attribute.KeyValue {
+ return GeoLocationLonKey.Float64(val)
+}
+
+// GeoPostalCode returns an attribute KeyValue conforming to the
+// "geo.postal_code" semantic conventions. It represents the postal code
+// associated with the location. Values appropriate for this field may also be
+// known as a postcode or ZIP code and will vary widely from country to country.
+func GeoPostalCode(val string) attribute.KeyValue {
+ return GeoPostalCodeKey.String(val)
+}
+
+// GeoRegionISOCode returns an attribute KeyValue conforming to the
+// "geo.region.iso_code" semantic conventions. It represents the region ISO code
+// ([ISO 3166-2]).
+//
+// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+func GeoRegionISOCode(val string) attribute.KeyValue {
+ return GeoRegionISOCodeKey.String(val)
+}
+
+// Enum values for geo.continent.code
+var (
+ // Africa
+ // Stability: development
+ GeoContinentCodeAf = GeoContinentCodeKey.String("AF")
+ // Antarctica
+ // Stability: development
+ GeoContinentCodeAn = GeoContinentCodeKey.String("AN")
+ // Asia
+ // Stability: development
+ GeoContinentCodeAs = GeoContinentCodeKey.String("AS")
+ // Europe
+ // Stability: development
+ GeoContinentCodeEu = GeoContinentCodeKey.String("EU")
+ // North America
+ // Stability: development
+ GeoContinentCodeNa = GeoContinentCodeKey.String("NA")
+ // Oceania
+ // Stability: development
+ GeoContinentCodeOc = GeoContinentCodeKey.String("OC")
+ // South America
+ // Stability: development
+ GeoContinentCodeSa = GeoContinentCodeKey.String("SA")
+)
+
+// Namespace: go
+const (
+ // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type"
+ // semantic conventions. It represents the type of memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "other", "stack"
+ GoMemoryTypeKey = attribute.Key("go.memory.type")
+)
+
+// Enum values for go.memory.type
+var (
+ // Memory allocated from the heap that is reserved for stack space, whether or
+ // not it is currently in-use.
+ // Stability: development
+ GoMemoryTypeStack = GoMemoryTypeKey.String("stack")
+ // Memory used by the Go runtime, excluding other categories of memory usage
+ // described in this enumeration.
+ // Stability: development
+ GoMemoryTypeOther = GoMemoryTypeKey.String("other")
+)
+
+// Namespace: graphql
+const (
+ // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document"
+ // semantic conventions. It represents the GraphQL document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: query findBookById { bookById(id: ?) { name } }
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphQLDocumentKey = attribute.Key("graphql.document")
+
+ // GraphQLOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of the
+ // operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: findBookById
+ GraphQLOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphQLOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of the
+ // operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "query", "mutation", "subscription"
+ GraphQLOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+// GraphQLDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphQLDocument(val string) attribute.KeyValue {
+ return GraphQLDocumentKey.String(val)
+}
+
+// GraphQLOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphQLOperationName(val string) attribute.KeyValue {
+ return GraphQLOperationNameKey.String(val)
+}
+
+// Enum values for graphql.operation.type
+var (
+ // GraphQL query
+ // Stability: development
+ GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query")
+ // GraphQL mutation
+ // Stability: development
+ GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ // Stability: development
+ GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription")
+)
+
+// Namespace: heroku
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da"
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit hash
+ // for the current release.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "e6134959463efd8966b20e75b913cafe3f5ec"
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents the
+ // time and date the release was created.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2022-10-23T18:00:42Z"
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id"
+// semantic conventions. It represents the unique identifier for the application.
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release.
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the
+// "heroku.release.creation_timestamp" semantic conventions. It represents the
+// time and date the release was created.
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// Namespace: host
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is running
+ // on.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+ // level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family"
+ // semantic conventions. It represents the family or generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "PA-RISC 1.1e"
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id"
+ // semantic conventions. It represents the model identifier. It provides more
+ // granular information about the CPU, distinguishing it from other CPUs within
+ // the same family.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "9000/778/B180L"
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz"
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping"
+ // semantic conventions. It represents the stepping or core revisions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1", "r1p1"
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "GenuineIntel"
+ // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX
+ // registers. Writing these to memory in this order results in a 12-character
+ // string.
+ //
+ // [CPUID]: https://wiki.osdev.org/CPUID
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be the
+ // instance_id assigned by the cloud provider. For non-containerized systems,
+ // this should be the `machine-id`. See the table below for the sources to use
+ // to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fdbf79e8af94cb7f9e8df36789187052"
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the VM image ID or host OS image ID. For
+ // Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ami-07b06b442921831e5"
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the "host.image.name"
+ // semantic conventions. It represents the name of the VM image or OS install
+ // the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905"
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version string
+ // of the VM image or host OS as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e"
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC 5952] format.
+ //
+ // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F"
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as
+ // hyphen-separated octets in uppercase hexadecimal form from most to least
+ // significant.
+ //
+ // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified hostname,
+ // or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-test"
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "n1-standard-1"
+ HostTypeKey = attribute.Key("host.type")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or generation
+// of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model identifier.
+// It provides more granular information about the CPU, distinguishing it from
+// other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+ return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use to
+// determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the "host.image.id"
+// semantic conventions. It represents the VM image ID or host OS image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM image
+// or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string of
+// the VM image or host OS as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic
+// conventions. It represents the available MAC addresses of the host, excluding
+// loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name" semantic
+// conventions. It represents the name of the host. On Unix systems, it may
+// contain what the hostname command returns, or the fully qualified hostname, or
+// another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type" semantic
+// conventions. It represents the type of host. For Cloud, this must be the
+// machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Enum values for host.arch
+var (
+ // AMD64
+ // Stability: development
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ // Stability: development
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ // Stability: development
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ // Stability: development
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ // Stability: development
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ // Stability: development
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ // Stability: development
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ // Stability: development
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// Namespace: http
+const (
+ // HTTPConnectionStateKey is the attribute Key conforming to the
+ // "http.connection.state" semantic conventions. It represents the state of the
+ // HTTP connection in the HTTP connection pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "idle"
+ HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of the
+ // request payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the HTTP request
+ // method.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GET", "POST", "HEAD"
+ // Note: HTTP request method value SHOULD be "known" to the instrumentation.
+ // By default, this convention defines "known" methods as the ones listed in
+ // [RFC9110],
+ // the PATCH method defined in [RFC5789]
+ // and the QUERY method defined in [httpbis-safe-method-w-body].
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set the
+ // `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of
+ // case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is not a
+ // list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods to be
+ // case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ //
+ // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods
+ // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html
+ // [httpbis-safe-method-w-body]: https://datatracker.ietf.org/doc/draft-ietf-httpbis-safe-method-w-body/?include_text=1
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GeT", "ACL", "foo"
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending (e.g.
+ // redirection, authorization failure, 503 Server Unavailable, network issues,
+ // or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size"
+ // semantic conventions. It represents the total size of the request in bytes.
+ // This should be the total number of bytes sent over the wire, including the
+ // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request
+ // body if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size of the
+ // response payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseSizeKey is the attribute Key conforming to the
+ // "http.response.size" semantic conventions. It represents the total size of
+ // the response in bytes. This should be the total number of bytes sent over the
+ // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+ // headers, and response body and trailers if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status code].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 200
+ //
+ // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic
+ // conventions. It represents the matched route template for the request. This
+ // MUST be low-cardinality and include all static path segments, with dynamic
+ // path segments represented with placeholders.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/users/:userID?", "my-controller/my-action/{id?}"
+ // Note: MUST NOT be populated when this is not supported by the HTTP server
+ // framework as the route attribute should have low-cardinality and the URI path
+ // can NOT substitute it.
+ // SHOULD include the [application root] if there is one.
+ //
+ // A static path segment is a part of the route template with a fixed,
+ // low-cardinality value. This includes literal strings like `/users/` and
+ // placeholders that
+ // are constrained to a finite, predefined set of values, e.g. `{controller}` or
+ // `{action}`.
+ //
+ // A dynamic path segment is a placeholder for a value that can have high
+ // cardinality and is not constrained to a predefined list like static path
+ // segments.
+ //
+ // Instrumentations SHOULD use routing information provided by the corresponding
+ // web framework. They SHOULD pick the most precise source of routing
+ // information and MAY
+ // support custom route formatting. Instrumentations SHOULD document the format
+ // and the API used to obtain the route string.
+ //
+ // [application root]: /docs/http/http-spans.md#http-server-definitions
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestHeader returns an attribute KeyValue conforming to the
+// "http.request.header" semantic conventions. It represents the HTTP request
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPRequestHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.request.header."+key, val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of the
+// request in bytes. This should be the total number of bytes sent over the wire,
+// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers,
+// and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+ return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of the
+// response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseHeader returns an attribute KeyValue conforming to the
+// "http.response.header" semantic conventions. It represents the HTTP response
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPResponseHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.response.header."+key, val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of the
+// response in bytes. This should be the total number of bytes sent over the
+// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+ return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route template for the
+// request. This MUST be low-cardinality and include all static path segments,
+// with dynamic path segments represented with placeholders.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Enum values for http.connection.state
+var (
+ // active state.
+ // Stability: development
+ HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+ // idle state.
+ // Stability: development
+ HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+// Enum values for http.request.method
+var (
+ // CONNECT method.
+ // Stability: stable
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method.
+ // Stability: stable
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method.
+ // Stability: stable
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method.
+ // Stability: stable
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method.
+ // Stability: stable
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method.
+ // Stability: stable
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method.
+ // Stability: stable
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method.
+ // Stability: stable
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method.
+ // Stability: stable
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // QUERY method.
+ // Stability: development
+ HTTPRequestMethodQuery = HTTPRequestMethodKey.String("QUERY")
+ // Any HTTP method that the instrumentation has no prior knowledge of.
+ // Stability: stable
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// Namespace: hw
+const (
+ // HwBatteryCapacityKey is the attribute Key conforming to the
+ // "hw.battery.capacity" semantic conventions. It represents the design capacity
+ // in Watts-hours or Amper-hours.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9.3Ah", "50Wh"
+ HwBatteryCapacityKey = attribute.Key("hw.battery.capacity")
+
+ // HwBatteryChemistryKey is the attribute Key conforming to the
+ // "hw.battery.chemistry" semantic conventions. It represents the battery
+ // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Li-ion", "NiMH"
+ //
+ // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html
+ HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry")
+
+ // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state"
+ // semantic conventions. It represents the current state of the battery.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwBatteryStateKey = attribute.Key("hw.battery.state")
+
+ // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version"
+ // semantic conventions. It represents the BIOS version of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2.3"
+ HwBiosVersionKey = attribute.Key("hw.bios_version")
+
+ // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version"
+ // semantic conventions. It represents the driver version for the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10.2.1-3"
+ HwDriverVersionKey = attribute.Key("hw.driver_version")
+
+ // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type"
+ // semantic conventions. It represents the type of the enclosure (useful for
+ // modular systems).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Computer", "Storage", "Switch"
+ HwEnclosureTypeKey = attribute.Key("hw.enclosure.type")
+
+ // HwFirmwareVersionKey is the attribute Key conforming to the
+ // "hw.firmware_version" semantic conventions. It represents the firmware
+ // version of the hardware component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2.0.1"
+ HwFirmwareVersionKey = attribute.Key("hw.firmware_version")
+
+ // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic
+ // conventions. It represents the type of task the GPU is performing.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwGpuTaskKey = attribute.Key("hw.gpu.task")
+
+ // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions.
+ // It represents an identifier for the hardware component, unique within the
+ // monitored host.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "win32battery_battery_testsysa33_1"
+ HwIDKey = attribute.Key("hw.id")
+
+ // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type"
+ // semantic conventions. It represents the type of limit for hardware
+ // components.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwLimitTypeKey = attribute.Key("hw.limit_type")
+
+ // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the
+ // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID
+ // Level of the logical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "RAID0+1", "RAID5", "RAID10"
+ HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level")
+
+ // HwLogicalDiskStateKey is the attribute Key conforming to the
+ // "hw.logical_disk.state" semantic conventions. It represents the state of the
+ // logical disk space usage.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state")
+
+ // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type"
+ // semantic conventions. It represents the type of the memory module.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "DDR4", "DDR5", "LPDDR5"
+ HwMemoryTypeKey = attribute.Key("hw.memory.type")
+
+ // HwModelKey is the attribute Key conforming to the "hw.model" semantic
+ // conventions. It represents the descriptive model name of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery"
+ HwModelKey = attribute.Key("hw.model")
+
+ // HwNameKey is the attribute Key conforming to the "hw.name" semantic
+ // conventions. It represents an easily-recognizable name for the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "eth0"
+ HwNameKey = attribute.Key("hw.name")
+
+ // HwNetworkLogicalAddressesKey is the attribute Key conforming to the
+ // "hw.network.logical_addresses" semantic conventions. It represents the
+ // logical addresses of the adapter (e.g. IP address, or WWPN).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "172.16.8.21", "57.11.193.42"
+ HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses")
+
+ // HwNetworkPhysicalAddressKey is the attribute Key conforming to the
+ // "hw.network.physical_address" semantic conventions. It represents the
+ // physical address of the adapter (e.g. MAC address, or WWNN).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00-90-F5-E9-7B-36"
+ HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address")
+
+ // HwParentKey is the attribute Key conforming to the "hw.parent" semantic
+ // conventions. It represents the unique identifier of the parent component
+ // (typically the `hw.id` attribute of the enclosure, or disk controller).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dellStorage_perc_0"
+ HwParentKey = attribute.Key("hw.parent")
+
+ // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the
+ // "hw.physical_disk.smart_attribute" semantic conventions. It represents the
+ // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute
+ // of the physical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate"
+ //
+ // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T.
+ HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute")
+
+ // HwPhysicalDiskStateKey is the attribute Key conforming to the
+ // "hw.physical_disk.state" semantic conventions. It represents the state of the
+ // physical disk endurance utilization.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state")
+
+ // HwPhysicalDiskTypeKey is the attribute Key conforming to the
+ // "hw.physical_disk.type" semantic conventions. It represents the type of the
+ // physical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "HDD", "SSD", "10K"
+ HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type")
+
+ // HwSensorLocationKey is the attribute Key conforming to the
+ // "hw.sensor_location" semantic conventions. It represents the location of the
+ // sensor.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0
+ // V3_3", "MAIN_12V", "CPU_VCORE"
+ HwSensorLocationKey = attribute.Key("hw.sensor_location")
+
+ // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number"
+ // semantic conventions. It represents the serial number of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CNFCP0123456789"
+ HwSerialNumberKey = attribute.Key("hw.serial_number")
+
+ // HwStateKey is the attribute Key conforming to the "hw.state" semantic
+ // conventions. It represents the current state of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwStateKey = attribute.Key("hw.state")
+
+ // HwTapeDriveOperationTypeKey is the attribute Key conforming to the
+ // "hw.tape_drive.operation_type" semantic conventions. It represents the type
+ // of tape drive operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type")
+
+ // HwTypeKey is the attribute Key conforming to the "hw.type" semantic
+ // conventions. It represents the type of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: Describes the category of the hardware component for which `hw.state`
+ // is being reported. For example, `hw.type=temperature` along with
+ // `hw.state=degraded` would indicate that the temperature of the hardware
+ // component has been reported as `degraded`.
+ HwTypeKey = attribute.Key("hw.type")
+
+ // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic
+ // conventions. It represents the vendor name of the hardware component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo"
+ HwVendorKey = attribute.Key("hw.vendor")
+)
+
+// HwBatteryCapacity returns an attribute KeyValue conforming to the
+// "hw.battery.capacity" semantic conventions. It represents the design capacity
+// in Watts-hours or Amper-hours.
+func HwBatteryCapacity(val string) attribute.KeyValue {
+ return HwBatteryCapacityKey.String(val)
+}
+
+// HwBatteryChemistry returns an attribute KeyValue conforming to the
+// "hw.battery.chemistry" semantic conventions. It represents the battery
+// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc.
+//
+// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html
+func HwBatteryChemistry(val string) attribute.KeyValue {
+ return HwBatteryChemistryKey.String(val)
+}
+
+// HwBiosVersion returns an attribute KeyValue conforming to the
+// "hw.bios_version" semantic conventions. It represents the BIOS version of the
+// hardware component.
+func HwBiosVersion(val string) attribute.KeyValue {
+ return HwBiosVersionKey.String(val)
+}
+
+// HwDriverVersion returns an attribute KeyValue conforming to the
+// "hw.driver_version" semantic conventions. It represents the driver version for
+// the hardware component.
+func HwDriverVersion(val string) attribute.KeyValue {
+ return HwDriverVersionKey.String(val)
+}
+
+// HwEnclosureType returns an attribute KeyValue conforming to the
+// "hw.enclosure.type" semantic conventions. It represents the type of the
+// enclosure (useful for modular systems).
+func HwEnclosureType(val string) attribute.KeyValue {
+ return HwEnclosureTypeKey.String(val)
+}
+
+// HwFirmwareVersion returns an attribute KeyValue conforming to the
+// "hw.firmware_version" semantic conventions. It represents the firmware version
+// of the hardware component.
+func HwFirmwareVersion(val string) attribute.KeyValue {
+ return HwFirmwareVersionKey.String(val)
+}
+
+// HwID returns an attribute KeyValue conforming to the "hw.id" semantic
+// conventions. It represents an identifier for the hardware component, unique
+// within the monitored host.
+func HwID(val string) attribute.KeyValue {
+ return HwIDKey.String(val)
+}
+
+// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the
+// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID
+// Level of the logical disk.
+func HwLogicalDiskRaidLevel(val string) attribute.KeyValue {
+ return HwLogicalDiskRaidLevelKey.String(val)
+}
+
+// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type"
+// semantic conventions. It represents the type of the memory module.
+func HwMemoryType(val string) attribute.KeyValue {
+ return HwMemoryTypeKey.String(val)
+}
+
+// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic
+// conventions. It represents the descriptive model name of the hardware
+// component.
+func HwModel(val string) attribute.KeyValue {
+ return HwModelKey.String(val)
+}
+
+// HwName returns an attribute KeyValue conforming to the "hw.name" semantic
+// conventions. It represents an easily-recognizable name for the hardware
+// component.
+func HwName(val string) attribute.KeyValue {
+ return HwNameKey.String(val)
+}
+
+// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the
+// "hw.network.logical_addresses" semantic conventions. It represents the logical
+// addresses of the adapter (e.g. IP address, or WWPN).
+func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue {
+ return HwNetworkLogicalAddressesKey.StringSlice(val)
+}
+
+// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the
+// "hw.network.physical_address" semantic conventions. It represents the physical
+// address of the adapter (e.g. MAC address, or WWNN).
+func HwNetworkPhysicalAddress(val string) attribute.KeyValue {
+ return HwNetworkPhysicalAddressKey.String(val)
+}
+
+// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic
+// conventions. It represents the unique identifier of the parent component
+// (typically the `hw.id` attribute of the enclosure, or disk controller).
+func HwParent(val string) attribute.KeyValue {
+ return HwParentKey.String(val)
+}
+
+// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the
+// "hw.physical_disk.smart_attribute" semantic conventions. It represents the
+// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute
+// of the physical disk.
+//
+// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T.
+func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue {
+ return HwPhysicalDiskSmartAttributeKey.String(val)
+}
+
+// HwPhysicalDiskType returns an attribute KeyValue conforming to the
+// "hw.physical_disk.type" semantic conventions. It represents the type of the
+// physical disk.
+func HwPhysicalDiskType(val string) attribute.KeyValue {
+ return HwPhysicalDiskTypeKey.String(val)
+}
+
+// HwSensorLocation returns an attribute KeyValue conforming to the
+// "hw.sensor_location" semantic conventions. It represents the location of the
+// sensor.
+func HwSensorLocation(val string) attribute.KeyValue {
+ return HwSensorLocationKey.String(val)
+}
+
+// HwSerialNumber returns an attribute KeyValue conforming to the
+// "hw.serial_number" semantic conventions. It represents the serial number of
+// the hardware component.
+func HwSerialNumber(val string) attribute.KeyValue {
+ return HwSerialNumberKey.String(val)
+}
+
+// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic
+// conventions. It represents the vendor name of the hardware component.
+func HwVendor(val string) attribute.KeyValue {
+ return HwVendorKey.String(val)
+}
+
+// Enum values for hw.battery.state
+var (
+ // Charging
+ // Stability: development
+ HwBatteryStateCharging = HwBatteryStateKey.String("charging")
+ // Discharging
+ // Stability: development
+ HwBatteryStateDischarging = HwBatteryStateKey.String("discharging")
+)
+
+// Enum values for hw.gpu.task
+var (
+ // Decoder
+ // Stability: development
+ HwGpuTaskDecoder = HwGpuTaskKey.String("decoder")
+ // Encoder
+ // Stability: development
+ HwGpuTaskEncoder = HwGpuTaskKey.String("encoder")
+ // General
+ // Stability: development
+ HwGpuTaskGeneral = HwGpuTaskKey.String("general")
+)
+
+// Enum values for hw.limit_type
+var (
+ // Critical
+ // Stability: development
+ HwLimitTypeCritical = HwLimitTypeKey.String("critical")
+ // Degraded
+ // Stability: development
+ HwLimitTypeDegraded = HwLimitTypeKey.String("degraded")
+ // High Critical
+ // Stability: development
+ HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical")
+ // High Degraded
+ // Stability: development
+ HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded")
+ // Low Critical
+ // Stability: development
+ HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical")
+ // Low Degraded
+ // Stability: development
+ HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded")
+ // Maximum
+ // Stability: development
+ HwLimitTypeMax = HwLimitTypeKey.String("max")
+ // Throttled
+ // Stability: development
+ HwLimitTypeThrottled = HwLimitTypeKey.String("throttled")
+ // Turbo
+ // Stability: development
+ HwLimitTypeTurbo = HwLimitTypeKey.String("turbo")
+)
+
+// Enum values for hw.logical_disk.state
+var (
+ // Used
+ // Stability: development
+ HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used")
+ // Free
+ // Stability: development
+ HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free")
+)
+
+// Enum values for hw.physical_disk.state
+var (
+ // Remaining
+ // Stability: development
+ HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining")
+)
+
+// Enum values for hw.state
+var (
+ // Degraded
+ // Stability: development
+ HwStateDegraded = HwStateKey.String("degraded")
+ // Failed
+ // Stability: development
+ HwStateFailed = HwStateKey.String("failed")
+ // Needs Cleaning
+ // Stability: development
+ HwStateNeedsCleaning = HwStateKey.String("needs_cleaning")
+ // OK
+ // Stability: development
+ HwStateOk = HwStateKey.String("ok")
+ // Predicted Failure
+ // Stability: development
+ HwStatePredictedFailure = HwStateKey.String("predicted_failure")
+)
+
+// Enum values for hw.tape_drive.operation_type
+var (
+ // Mount
+ // Stability: development
+ HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount")
+ // Unmount
+ // Stability: development
+ HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount")
+ // Clean
+ // Stability: development
+ HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean")
+)
+
+// Enum values for hw.type
+var (
+ // Battery
+ // Stability: development
+ HwTypeBattery = HwTypeKey.String("battery")
+ // CPU
+ // Stability: development
+ HwTypeCPU = HwTypeKey.String("cpu")
+ // Disk controller
+ // Stability: development
+ HwTypeDiskController = HwTypeKey.String("disk_controller")
+ // Enclosure
+ // Stability: development
+ HwTypeEnclosure = HwTypeKey.String("enclosure")
+ // Fan
+ // Stability: development
+ HwTypeFan = HwTypeKey.String("fan")
+ // GPU
+ // Stability: development
+ HwTypeGpu = HwTypeKey.String("gpu")
+ // Logical disk
+ // Stability: development
+ HwTypeLogicalDisk = HwTypeKey.String("logical_disk")
+ // Memory
+ // Stability: development
+ HwTypeMemory = HwTypeKey.String("memory")
+ // Network
+ // Stability: development
+ HwTypeNetwork = HwTypeKey.String("network")
+ // Physical disk
+ // Stability: development
+ HwTypePhysicalDisk = HwTypeKey.String("physical_disk")
+ // Power supply
+ // Stability: development
+ HwTypePowerSupply = HwTypeKey.String("power_supply")
+ // Tape drive
+ // Stability: development
+ HwTypeTapeDrive = HwTypeKey.String("tape_drive")
+ // Temperature
+ // Stability: development
+ HwTypeTemperature = HwTypeKey.String("temperature")
+ // Voltage
+ // Stability: development
+ HwTypeVoltage = HwTypeKey.String("voltage")
+)
+
+// Namespace: ios
+const (
+ // IOSAppStateKey is the attribute Key conforming to the "ios.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The iOS lifecycle states are defined in the
+ // [UIApplicationDelegate documentation], and from which the `OS terminology`
+ // column values are derived.
+ //
+ // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate
+ IOSAppStateKey = attribute.Key("ios.app.state")
+)
+
+// Enum values for ios.app.state
+var (
+ // The app has become `active`. Associated with UIKit notification
+ // `applicationDidBecomeActive`.
+ //
+ // Stability: development
+ IOSAppStateActive = IOSAppStateKey.String("active")
+ // The app is now `inactive`. Associated with UIKit notification
+ // `applicationWillResignActive`.
+ //
+ // Stability: development
+ IOSAppStateInactive = IOSAppStateKey.String("inactive")
+ // The app is now in the background. This value is associated with UIKit
+ // notification `applicationDidEnterBackground`.
+ //
+ // Stability: development
+ IOSAppStateBackground = IOSAppStateKey.String("background")
+ // The app is now in the foreground. This value is associated with UIKit
+ // notification `applicationWillEnterForeground`.
+ //
+ // Stability: development
+ IOSAppStateForeground = IOSAppStateKey.String("foreground")
+ // The app is about to terminate. Associated with UIKit notification
+ // `applicationWillTerminate`.
+ //
+ // Stability: development
+ IOSAppStateTerminate = IOSAppStateKey.String("terminate")
+)
+
+// Namespace: jsonrpc
+const (
+ // JSONRPCProtocolVersionKey is the attribute Key conforming to the
+ // "jsonrpc.protocol.version" semantic conventions. It represents the protocol
+ // version, as specified in the `jsonrpc` property of the request and its
+ // corresponding response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2.0", "1.0"
+ JSONRPCProtocolVersionKey = attribute.Key("jsonrpc.protocol.version")
+
+ // JSONRPCRequestIDKey is the attribute Key conforming to the
+ // "jsonrpc.request.id" semantic conventions. It represents a string
+ // representation of the `id` property of the request and its corresponding
+ // response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10", "request-7"
+ // Note: Under the [JSON-RPC specification], the `id` property may be a string,
+ // number, null, or omitted entirely. When omitted, the request is treated as a
+ // notification. Using `null` is not equivalent to omitting the `id`, but it is
+ // discouraged.
+ // Instrumentations SHOULD NOT capture this attribute when the `id` is `null` or
+ // omitted.
+ //
+ // [JSON-RPC specification]: https://www.jsonrpc.org/specification
+ JSONRPCRequestIDKey = attribute.Key("jsonrpc.request.id")
+)
+
+// JSONRPCProtocolVersion returns an attribute KeyValue conforming to the
+// "jsonrpc.protocol.version" semantic conventions. It represents the protocol
+// version, as specified in the `jsonrpc` property of the request and its
+// corresponding response.
+func JSONRPCProtocolVersion(val string) attribute.KeyValue {
+ return JSONRPCProtocolVersionKey.String(val)
+}
+
+// JSONRPCRequestID returns an attribute KeyValue conforming to the
+// "jsonrpc.request.id" semantic conventions. It represents a string
+// representation of the `id` property of the request and its corresponding
+// response.
+func JSONRPCRequestID(val string) attribute.KeyValue {
+ return JSONRPCRequestIDKey.String(val)
+}
+
+// Namespace: k8s
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name"
+ // semantic conventions. It represents the name of the cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "opentelemetry-cluster"
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid"
+ // semantic conventions. It represents a pseudo-ID for the cluster, set to the
+ // UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8s cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8s ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T X.667].
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // > different from all other UUIDs generated before 3603 A.D., or is
+ // > extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ //
+ // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "redis"
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the number
+ // of times the container was restarted. This attribute can be used to identify
+ // a particular container (running or stopped) within a container spec.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples:
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to
+ // the "k8s.container.status.last_terminated_reason" semantic conventions. It
+ // represents the last terminated reason of the Container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Evicted", "Error"
+ K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+ // K8SContainerStatusReasonKey is the attribute Key conforming to the
+ // "k8s.container.status.reason" semantic conventions. It represents the reason
+ // for the container state. Corresponds to the `reason` field of the:
+ // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ContainerCreating", "CrashLoopBackOff",
+ // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff",
+ // "OOMKilled", "Completed", "Error", "ContainerCannotRun"
+ //
+ // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core
+ // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core
+ K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason")
+
+ // K8SContainerStatusStateKey is the attribute Key conforming to the
+ // "k8s.container.status.state" semantic conventions. It represents the state of
+ // the container. [K8s ContainerState].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "terminated", "running", "waiting"
+ //
+ // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core
+ K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name"
+ // semantic conventions. It represents the name of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "opentelemetry"
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid"
+ // semantic conventions. It represents the UID of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "opentelemetry"
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid"
+ // semantic conventions. It represents the UID of the DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "opentelemetry"
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SHPAMetricTypeKey is the attribute Key conforming to the
+ // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric
+ // source for the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Resource", "ContainerResource"
+ // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA.
+ K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type")
+
+ // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic
+ // conventions. It represents the name of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SHPANameKey = attribute.Key("k8s.hpa.name")
+
+ // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the
+ // API version of the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "apps/v1", "autoscaling/v2"
+ // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA
+ // spec.
+ K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version")
+
+ // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of
+ // the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Deployment", "StatefulSet"
+ // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec.
+ K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind")
+
+ // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of
+ // the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-deployment", "my-statefulset"
+ // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec.
+ K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name")
+
+ // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic
+ // conventions. It represents the UID of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SHPAUIDKey = attribute.Key("k8s.hpa.uid")
+
+ // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size"
+ // semantic conventions. It represents the size (identifier) of the K8s huge
+ // page.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2Mi"
+ K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic
+ // conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "opentelemetry"
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic
+ // conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "default"
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNamespacePhaseKey is the attribute Key conforming to the
+ // "k8s.namespace.phase" semantic conventions. It represents the phase of the
+ // K8s namespace.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "terminating"
+ // Note: This attribute aligns with the `phase` field of the
+ // [K8s NamespaceStatus]
+ //
+ // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core
+ K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase")
+
+ // K8SNodeConditionStatusKey is the attribute Key conforming to the
+ // "k8s.node.condition.status" semantic conventions. It represents the status of
+ // the condition, one of True, False, Unknown.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "true", "false", "unknown"
+ // Note: This attribute aligns with the `status` field of the
+ // [NodeCondition]
+ //
+ // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core
+ K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status")
+
+ // K8SNodeConditionTypeKey is the attribute Key conforming to the
+ // "k8s.node.condition.type" semantic conventions. It represents the condition
+ // type of a K8s Node.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Ready", "DiskPressure"
+ // Note: K8s Node conditions as described
+ // by [K8s documentation].
+ //
+ // This attribute aligns with the `type` field of the
+ // [NodeCondition]
+ //
+ // The set of possible values is not limited to those listed here. Managed
+ // Kubernetes environments,
+ // or custom controllers MAY introduce additional node condition types.
+ // When this occurs, the exact value as reported by the Kubernetes API SHOULD be
+ // used.
+ //
+ // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition
+ // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core
+ K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "node-1"
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic
+ // conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2"
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodHostnameKey is the attribute Key conforming to the "k8s.pod.hostname"
+ // semantic conventions. It represents the specifies the hostname of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "collector-gateway"
+ // Note: The K8s Pod spec has an optional hostname field, which can be used to
+ // specify a hostname.
+ // Refer to [K8s docs]
+ // for more information about this field.
+ //
+ // This attribute aligns with the `hostname` field of the
+ // [K8s PodSpec].
+ //
+ // [K8s docs]: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-hostname-and-subdomain-field
+ // [K8s PodSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#podspec-v1-core
+ K8SPodHostnameKey = attribute.Key("k8s.pod.hostname")
+
+ // K8SPodIPKey is the attribute Key conforming to the "k8s.pod.ip" semantic
+ // conventions. It represents the IP address allocated to the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "172.18.0.2"
+ // Note: This attribute aligns with the `podIP` field of the
+ // [K8s PodStatus].
+ //
+ // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#podstatus-v1-core
+ K8SPodIPKey = attribute.Key("k8s.pod.ip")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic
+ // conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "opentelemetry-pod-autoconf"
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodStartTimeKey is the attribute Key conforming to the
+ // "k8s.pod.start_time" semantic conventions. It represents the start timestamp
+ // of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "2025-12-04T08:41:03Z"
+ // Note: Date and time at which the object was acknowledged by the Kubelet.
+ // This is before the Kubelet pulled the container image(s) for the pod.
+ //
+ // This attribute aligns with the `startTime` field of the
+ // [K8s PodStatus],
+ // in ISO 8601 (RFC 3339 compatible) format.
+ //
+ // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.34/#podstatus-v1-core
+ K8SPodStartTimeKey = attribute.Key("k8s.pod.start_time")
+
+ // K8SPodStatusPhaseKey is the attribute Key conforming to the
+ // "k8s.pod.status.phase" semantic conventions. It represents the phase for the
+ // pod. Corresponds to the `phase` field of the: [K8s PodStatus].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Pending", "Running"
+ //
+ // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core
+ K8SPodStatusPhaseKey = attribute.Key("k8s.pod.status.phase")
+
+ // K8SPodStatusReasonKey is the attribute Key conforming to the
+ // "k8s.pod.status.reason" semantic conventions. It represents the reason for
+ // the pod state. Corresponds to the `reason` field of the: [K8s PodStatus].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Evicted", "NodeAffinity"
+ //
+ // [K8s PodStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podstatus-v1-core
+ K8SPodStatusReasonKey = attribute.Key("k8s.pod.status.reason")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic
+ // conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "opentelemetry"
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicationControllerNameKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.name" semantic conventions. It represents the name
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name")
+
+ // K8SReplicationControllerUIDKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid")
+
+ // K8SResourceQuotaNameKey is the attribute Key conforming to the
+ // "k8s.resourcequota.name" semantic conventions. It represents the name of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name")
+
+ // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the
+ // "k8s.resourcequota.resource_name" semantic conventions. It represents the
+ // name of the K8s resource a resource quota defines.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "count/replicationcontrollers"
+ // Note: The value for this attribute can be either the full
+ // `count/[.]` string (e.g., count/deployments.apps,
+ // count/pods), or, for certain core Kubernetes resources, just the resource
+ // name (e.g., pods, services, configmaps). Both forms are supported by
+ // Kubernetes for object count quotas. See
+ // [Kubernetes Resource Quotas documentation] for more details.
+ //
+ // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#quota-on-object-count
+ K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name")
+
+ // K8SResourceQuotaUIDKey is the attribute Key conforming to the
+ // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "opentelemetry"
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Alpha
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStorageclassNameKey is the attribute Key conforming to the
+ // "k8s.storageclass.name" semantic conventions. It represents the name of K8s
+ // [StorageClass] object.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gold.storageclass.storage.k8s.io"
+ //
+ // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io
+ K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name")
+
+ // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name"
+ // semantic conventions. It represents the name of the K8s volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "volume0"
+ K8SVolumeNameKey = attribute.Key("k8s.volume.name")
+
+ // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type"
+ // semantic conventions. It represents the type of the K8s volume.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "emptyDir", "persistentVolumeClaim"
+ K8SVolumeTypeKey = attribute.Key("k8s.volume.type")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify a
+// particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+ return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob
+// annotation placed on the CronJob, the `` being the annotation name, the
+// value being the annotation value.
+func K8SCronJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.annotation."+key, val)
+}
+
+// K8SCronJobLabel returns an attribute KeyValue conforming to the
+// "k8s.cronjob.label" semantic conventions. It represents the label placed on
+// the CronJob, the `` being the label name, the value being the label
+// value.
+func K8SCronJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.label."+key, val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.daemonset.annotation" semantic conventions. It represents the annotation
+// placed on the DaemonSet, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.annotation."+key, val)
+}
+
+// K8SDaemonSetLabel returns an attribute KeyValue conforming to the
+// "k8s.daemonset.label" semantic conventions. It represents the label placed on
+// the DaemonSet, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SDaemonSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.label."+key, val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the
+// "k8s.deployment.annotation" semantic conventions. It represents the annotation
+// placed on the Deployment, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.annotation."+key, val)
+}
+
+// K8SDeploymentLabel returns an attribute KeyValue conforming to the
+// "k8s.deployment.label" semantic conventions. It represents the label placed on
+// the Deployment, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SDeploymentLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.label."+key, val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SHPAMetricType returns an attribute KeyValue conforming to the
+// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric
+// source for the horizontal pod autoscaler.
+func K8SHPAMetricType(val string) attribute.KeyValue {
+ return K8SHPAMetricTypeKey.String(val)
+}
+
+// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name"
+// semantic conventions. It represents the name of the horizontal pod autoscaler.
+func K8SHPAName(val string) attribute.KeyValue {
+ return K8SHPANameKey.String(val)
+}
+
+// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the
+// API version of the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefAPIVersionKey.String(val)
+}
+
+// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of
+// the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefKind(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefKindKey.String(val)
+}
+
+// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of
+// the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefName(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefNameKey.String(val)
+}
+
+// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid"
+// semantic conventions. It represents the UID of the horizontal pod autoscaler.
+func K8SHPAUID(val string) attribute.KeyValue {
+ return K8SHPAUIDKey.String(val)
+}
+
+// K8SHugepageSize returns an attribute KeyValue conforming to the
+// "k8s.hugepage.size" semantic conventions. It represents the size (identifier)
+// of the K8s huge page.
+func K8SHugepageSize(val string) attribute.KeyValue {
+ return K8SHugepageSizeKey.String(val)
+}
+
+// K8SJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.job.annotation" semantic conventions. It represents the annotation placed
+// on the Job, the `` being the annotation name, the value being the
+// annotation value, even if the value is empty.
+func K8SJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.annotation."+key, val)
+}
+
+// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label"
+// semantic conventions. It represents the label placed on the Job, the ``
+// being the label name, the value being the label value, even if the value is
+// empty.
+func K8SJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.label."+key, val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the
+// "k8s.namespace.annotation" semantic conventions. It represents the annotation
+// placed on the Namespace, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.annotation."+key, val)
+}
+
+// K8SNamespaceLabel returns an attribute KeyValue conforming to the
+// "k8s.namespace.label" semantic conventions. It represents the label placed on
+// the Namespace, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SNamespaceLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.label."+key, val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeAnnotation returns an attribute KeyValue conforming to the
+// "k8s.node.annotation" semantic conventions. It represents the annotation
+// placed on the Node, the `` being the annotation name, the value being the
+// annotation value, even if the value is empty.
+func K8SNodeAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.annotation."+key, val)
+}
+
+// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label"
+// semantic conventions. It represents the label placed on the Node, the ``
+// being the label name, the value being the label value, even if the value is
+// empty.
+func K8SNodeLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.label."+key, val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name"
+// semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodAnnotation returns an attribute KeyValue conforming to the
+// "k8s.pod.annotation" semantic conventions. It represents the annotation placed
+// on the Pod, the `` being the annotation name, the value being the
+// annotation value.
+func K8SPodAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.annotation."+key, val)
+}
+
+// K8SPodHostname returns an attribute KeyValue conforming to the
+// "k8s.pod.hostname" semantic conventions. It represents the specifies the
+// hostname of the Pod.
+func K8SPodHostname(val string) attribute.KeyValue {
+ return K8SPodHostnameKey.String(val)
+}
+
+// K8SPodIP returns an attribute KeyValue conforming to the "k8s.pod.ip" semantic
+// conventions. It represents the IP address allocated to the Pod.
+func K8SPodIP(val string) attribute.KeyValue {
+ return K8SPodIPKey.String(val)
+}
+
+// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label"
+// semantic conventions. It represents the label placed on the Pod, the ``
+// being the label name, the value being the label value.
+func K8SPodLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.label."+key, val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodStartTime returns an attribute KeyValue conforming to the
+// "k8s.pod.start_time" semantic conventions. It represents the start timestamp
+// of the Pod.
+func K8SPodStartTime(val string) attribute.KeyValue {
+ return K8SPodStartTimeKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.replicaset.annotation" semantic conventions. It represents the annotation
+// placed on the ReplicaSet, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.annotation."+key, val)
+}
+
+// K8SReplicaSetLabel returns an attribute KeyValue conforming to the
+// "k8s.replicaset.label" semantic conventions. It represents the label placed on
+// the ReplicaSet, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SReplicaSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.label."+key, val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicationControllerName returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.name" semantic conventions. It represents the name
+// of the replication controller.
+func K8SReplicationControllerName(val string) attribute.KeyValue {
+ return K8SReplicationControllerNameKey.String(val)
+}
+
+// K8SReplicationControllerUID returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of
+// the replication controller.
+func K8SReplicationControllerUID(val string) attribute.KeyValue {
+ return K8SReplicationControllerUIDKey.String(val)
+}
+
+// K8SResourceQuotaName returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.name" semantic conventions. It represents the name of the
+// resource quota.
+func K8SResourceQuotaName(val string) attribute.KeyValue {
+ return K8SResourceQuotaNameKey.String(val)
+}
+
+// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.resource_name" semantic conventions. It represents the name
+// of the K8s resource a resource quota defines.
+func K8SResourceQuotaResourceName(val string) attribute.KeyValue {
+ return K8SResourceQuotaResourceNameKey.String(val)
+}
+
+// K8SResourceQuotaUID returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+// resource quota.
+func K8SResourceQuotaUID(val string) attribute.KeyValue {
+ return K8SResourceQuotaUIDKey.String(val)
+}
+
+// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.statefulset.annotation" semantic conventions. It represents the
+// annotation placed on the StatefulSet, the `` being the annotation name,
+// the value being the annotation value, even if the value is empty.
+func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.annotation."+key, val)
+}
+
+// K8SStatefulSetLabel returns an attribute KeyValue conforming to the
+// "k8s.statefulset.label" semantic conventions. It represents the label placed
+// on the StatefulSet, the `` being the label name, the value being the
+// label value, even if the value is empty.
+func K8SStatefulSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.label."+key, val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStorageclassName returns an attribute KeyValue conforming to the
+// "k8s.storageclass.name" semantic conventions. It represents the name of K8s
+// [StorageClass] object.
+//
+// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io
+func K8SStorageclassName(val string) attribute.KeyValue {
+ return K8SStorageclassNameKey.String(val)
+}
+
+// K8SVolumeName returns an attribute KeyValue conforming to the
+// "k8s.volume.name" semantic conventions. It represents the name of the K8s
+// volume.
+func K8SVolumeName(val string) attribute.KeyValue {
+ return K8SVolumeNameKey.String(val)
+}
+
+// Enum values for k8s.container.status.reason
+var (
+ // The container is being created.
+ // Stability: development
+ K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating")
+ // The container is in a crash loop back off state.
+ // Stability: development
+ K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff")
+ // There was an error creating the container configuration.
+ // Stability: development
+ K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError")
+ // There was an error pulling the container image.
+ // Stability: development
+ K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull")
+ // The container image pull is in back off state.
+ // Stability: development
+ K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff")
+ // The container was killed due to out of memory.
+ // Stability: development
+ K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled")
+ // The container has completed execution.
+ // Stability: development
+ K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed")
+ // There was an error with the container.
+ // Stability: development
+ K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error")
+ // The container cannot run.
+ // Stability: development
+ K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun")
+)
+
+// Enum values for k8s.container.status.state
+var (
+ // The container has terminated.
+ // Stability: development
+ K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated")
+ // The container is running.
+ // Stability: development
+ K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running")
+ // The container is waiting.
+ // Stability: development
+ K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting")
+)
+
+// Enum values for k8s.namespace.phase
+var (
+ // Active namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active")
+ // Terminating namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating")
+)
+
+// Enum values for k8s.node.condition.status
+var (
+ // condition_true
+ // Stability: development
+ K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true")
+ // condition_false
+ // Stability: development
+ K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false")
+ // condition_unknown
+ // Stability: development
+ K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown")
+)
+
+// Enum values for k8s.node.condition.type
+var (
+ // The node is healthy and ready to accept pods
+ // Stability: development
+ K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready")
+ // Pressure exists on the disk size—that is, if the disk capacity is low
+ // Stability: development
+ K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure")
+ // Pressure exists on the node memory—that is, if the node memory is low
+ // Stability: development
+ K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure")
+ // Pressure exists on the processes—that is, if there are too many processes
+ // on the node
+ // Stability: development
+ K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure")
+ // The network for the node is not correctly configured
+ // Stability: development
+ K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable")
+)
+
+// Enum values for k8s.pod.status.phase
+var (
+ // The pod has been accepted by the system, but one or more of the containers
+ // has not been started. This includes time before being bound to a node, as
+ // well as time spent pulling images onto the host.
+ //
+ // Stability: development
+ K8SPodStatusPhasePending = K8SPodStatusPhaseKey.String("Pending")
+ // The pod has been bound to a node and all of the containers have been started.
+ // At least one container is still running or is in the process of being
+ // restarted.
+ //
+ // Stability: development
+ K8SPodStatusPhaseRunning = K8SPodStatusPhaseKey.String("Running")
+ // All containers in the pod have voluntarily terminated with a container exit
+ // code of 0, and the system is not going to restart any of these containers.
+ //
+ // Stability: development
+ K8SPodStatusPhaseSucceeded = K8SPodStatusPhaseKey.String("Succeeded")
+ // All containers in the pod have terminated, and at least one container has
+ // terminated in a failure (exited with a non-zero exit code or was stopped by
+ // the system).
+ //
+ // Stability: development
+ K8SPodStatusPhaseFailed = K8SPodStatusPhaseKey.String("Failed")
+ // For some reason the state of the pod could not be obtained, typically due to
+ // an error in communicating with the host of the pod.
+ //
+ // Stability: development
+ K8SPodStatusPhaseUnknown = K8SPodStatusPhaseKey.String("Unknown")
+)
+
+// Enum values for k8s.pod.status.reason
+var (
+ // The pod is evicted.
+ // Stability: development
+ K8SPodStatusReasonEvicted = K8SPodStatusReasonKey.String("Evicted")
+ // The pod is in a status because of its node affinity
+ // Stability: development
+ K8SPodStatusReasonNodeAffinity = K8SPodStatusReasonKey.String("NodeAffinity")
+ // The reason on a pod when its state cannot be confirmed as kubelet is
+ // unresponsive on the node it is (was) running.
+ //
+ // Stability: development
+ K8SPodStatusReasonNodeLost = K8SPodStatusReasonKey.String("NodeLost")
+ // The node is shutdown
+ // Stability: development
+ K8SPodStatusReasonShutdown = K8SPodStatusReasonKey.String("Shutdown")
+ // The pod was rejected admission to the node because of an error during
+ // admission that could not be categorized.
+ //
+ // Stability: development
+ K8SPodStatusReasonUnexpectedAdmissionError = K8SPodStatusReasonKey.String("UnexpectedAdmissionError")
+)
+
+// Enum values for k8s.volume.type
+var (
+ // A [persistentVolumeClaim] volume
+ // Stability: development
+ //
+ // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim
+ K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim")
+ // A [configMap] volume
+ // Stability: development
+ //
+ // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap
+ K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap")
+ // A [downwardAPI] volume
+ // Stability: development
+ //
+ // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi
+ K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI")
+ // An [emptyDir] volume
+ // Stability: development
+ //
+ // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir
+ K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir")
+ // A [secret] volume
+ // Stability: development
+ //
+ // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret
+ K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret")
+ // A [local] volume
+ // Stability: development
+ //
+ // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local
+ K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local")
+)
+
+// Namespace: log
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "audit.log"
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the basename of
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "uuid.log"
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/log/mysql/audit.log"
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full path to
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/lib/docker/uuid.log"
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic
+ // conventions. It represents the stream associated with the log. See below for
+ // a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ LogIostreamKey = attribute.Key("log.iostream")
+
+ // LogRecordOriginalKey is the attribute Key conforming to the
+ // "log.record.original" semantic conventions. It represents the complete
+ // original Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - -
+ // Something happened", "[INFO] 8/3/24 12:34:56 Something happened"
+ // Note: This value MAY be added when processing a Log Record which was
+ // originally transmitted as a string or equivalent data type AND the Body field
+ // of the Log Record does not contain the same value. (e.g. a syslog or a log
+ // record read from a file.)
+ LogRecordOriginalKey = attribute.Key("log.record.original")
+
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV"
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other
+ // identifiers (e.g. UUID) may be used as needed.
+ //
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the "log.file.name"
+// semantic conventions. It represents the basename of the file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the "log.file.path"
+// semantic conventions. It represents the full path to the file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path to
+// the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// LogRecordOriginal returns an attribute KeyValue conforming to the
+// "log.record.original" semantic conventions. It represents the complete
+// original Log Record.
+func LogRecordOriginal(val string) attribute.KeyValue {
+ return LogRecordOriginalKey.String(val)
+}
+
+// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid"
+// semantic conventions. It represents a unique identifier for the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Enum values for log.iostream
+var (
+ // Logs from stdout stream
+ // Stability: development
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ // Stability: development
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Namespace: mainframe
+const (
+ // MainframeLparNameKey is the attribute Key conforming to the
+ // "mainframe.lpar.name" semantic conventions. It represents the name of the
+ // logical partition that hosts a systems with a mainframe operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "LPAR01"
+ MainframeLparNameKey = attribute.Key("mainframe.lpar.name")
+)
+
+// MainframeLparName returns an attribute KeyValue conforming to the
+// "mainframe.lpar.name" semantic conventions. It represents the name of the
+// logical partition that hosts a systems with a mainframe operating system.
+func MainframeLparName(val string) attribute.KeyValue {
+ return MainframeLparNameKey.String(val)
+}
+
+// Namespace: mcp
+const (
+ // McpMethodNameKey is the attribute Key conforming to the "mcp.method.name"
+ // semantic conventions. It represents the name of the request or notification
+ // method.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ McpMethodNameKey = attribute.Key("mcp.method.name")
+
+ // McpProtocolVersionKey is the attribute Key conforming to the
+ // "mcp.protocol.version" semantic conventions. It represents the [version] of
+ // the Model Context Protocol used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2025-06-18"
+ //
+ // [version]: https://modelcontextprotocol.io/specification/versioning
+ McpProtocolVersionKey = attribute.Key("mcp.protocol.version")
+
+ // McpResourceURIKey is the attribute Key conforming to the "mcp.resource.uri"
+ // semantic conventions. It represents the value of the resource uri.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "postgres://database/customers/schema",
+ // "file:///home/user/documents/report.pdf"
+ // Note: This is a URI of the resource provided in the following requests or
+ // notifications: `resources/read`, `resources/subscribe`,
+ // `resources/unsubscribe`, or `notifications/resources/updated`.
+ McpResourceURIKey = attribute.Key("mcp.resource.uri")
+
+ // McpSessionIDKey is the attribute Key conforming to the "mcp.session.id"
+ // semantic conventions. It represents the identifies [MCP session].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "191c4850af6c49e08843a3f6c80e5046"
+ //
+ // [MCP session]: https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#session-management
+ McpSessionIDKey = attribute.Key("mcp.session.id")
+)
+
+// McpProtocolVersion returns an attribute KeyValue conforming to the
+// "mcp.protocol.version" semantic conventions. It represents the [version] of
+// the Model Context Protocol used.
+//
+// [version]: https://modelcontextprotocol.io/specification/versioning
+func McpProtocolVersion(val string) attribute.KeyValue {
+ return McpProtocolVersionKey.String(val)
+}
+
+// McpResourceURI returns an attribute KeyValue conforming to the
+// "mcp.resource.uri" semantic conventions. It represents the value of the
+// resource uri.
+func McpResourceURI(val string) attribute.KeyValue {
+ return McpResourceURIKey.String(val)
+}
+
+// McpSessionID returns an attribute KeyValue conforming to the "mcp.session.id"
+// semantic conventions. It represents the identifies [MCP session].
+//
+// [MCP session]: https://modelcontextprotocol.io/specification/2025-06-18/basic/transports#session-management
+func McpSessionID(val string) attribute.KeyValue {
+ return McpSessionIDKey.String(val)
+}
+
+// Enum values for mcp.method.name
+var (
+ // Notification cancelling a previously-issued request.
+ //
+ // Stability: development
+ McpMethodNameNotificationsCancelled = McpMethodNameKey.String("notifications/cancelled")
+ // Request to initialize the MCP client.
+ //
+ // Stability: development
+ McpMethodNameInitialize = McpMethodNameKey.String("initialize")
+ // Notification indicating that the MCP client has been initialized.
+ //
+ // Stability: development
+ McpMethodNameNotificationsInitialized = McpMethodNameKey.String("notifications/initialized")
+ // Notification indicating the progress for a long-running operation.
+ //
+ // Stability: development
+ McpMethodNameNotificationsProgress = McpMethodNameKey.String("notifications/progress")
+ // Request to check that the other party is still alive.
+ //
+ // Stability: development
+ McpMethodNamePing = McpMethodNameKey.String("ping")
+ // Request to list resources available on server.
+ //
+ // Stability: development
+ McpMethodNameResourcesList = McpMethodNameKey.String("resources/list")
+ // Request to list resource templates available on server.
+ //
+ // Stability: development
+ McpMethodNameResourcesTemplatesList = McpMethodNameKey.String("resources/templates/list")
+ // Request to read a resource.
+ //
+ // Stability: development
+ McpMethodNameResourcesRead = McpMethodNameKey.String("resources/read")
+ // Notification indicating that the list of resources has changed.
+ //
+ // Stability: development
+ McpMethodNameNotificationsResourcesListChanged = McpMethodNameKey.String("notifications/resources/list_changed")
+ // Request to subscribe to a resource.
+ //
+ // Stability: development
+ McpMethodNameResourcesSubscribe = McpMethodNameKey.String("resources/subscribe")
+ // Request to unsubscribe from resource updates.
+ //
+ // Stability: development
+ McpMethodNameResourcesUnsubscribe = McpMethodNameKey.String("resources/unsubscribe")
+ // Notification indicating that a resource has been updated.
+ //
+ // Stability: development
+ McpMethodNameNotificationsResourcesUpdated = McpMethodNameKey.String("notifications/resources/updated")
+ // Request to list prompts available on server.
+ //
+ // Stability: development
+ McpMethodNamePromptsList = McpMethodNameKey.String("prompts/list")
+ // Request to get a prompt.
+ //
+ // Stability: development
+ McpMethodNamePromptsGet = McpMethodNameKey.String("prompts/get")
+ // Notification indicating that the list of prompts has changed.
+ //
+ // Stability: development
+ McpMethodNameNotificationsPromptsListChanged = McpMethodNameKey.String("notifications/prompts/list_changed")
+ // Request to list tools available on server.
+ //
+ // Stability: development
+ McpMethodNameToolsList = McpMethodNameKey.String("tools/list")
+ // Request to call a tool.
+ //
+ // Stability: development
+ McpMethodNameToolsCall = McpMethodNameKey.String("tools/call")
+ // Notification indicating that the list of tools has changed.
+ //
+ // Stability: development
+ McpMethodNameNotificationsToolsListChanged = McpMethodNameKey.String("notifications/tools/list_changed")
+ // Request to set the logging level.
+ //
+ // Stability: development
+ McpMethodNameLoggingSetLevel = McpMethodNameKey.String("logging/setLevel")
+ // Notification indicating that a message has been received.
+ //
+ // Stability: development
+ McpMethodNameNotificationsMessage = McpMethodNameKey.String("notifications/message")
+ // Request to create a sampling message.
+ //
+ // Stability: development
+ McpMethodNameSamplingCreateMessage = McpMethodNameKey.String("sampling/createMessage")
+ // Request to complete a prompt.
+ //
+ // Stability: development
+ McpMethodNameCompletionComplete = McpMethodNameKey.String("completion/complete")
+ // Request to list roots available on server.
+ //
+ // Stability: development
+ McpMethodNameRootsList = McpMethodNameKey.String("roots/list")
+ // Notification indicating that the list of roots has changed.
+ //
+ // Stability: development
+ McpMethodNameNotificationsRootsListChanged = McpMethodNameKey.String("notifications/roots/list_changed")
+ // Request from the server to elicit additional information from the user via
+ // the client
+ //
+ // Stability: development
+ McpMethodNameElicitationCreate = McpMethodNameKey.String("elicitation/create")
+)
+
+// Namespace: messaging
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the batching
+ // operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client library
+ // supports both batch and single-message API for the same operation,
+ // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs
+ // and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client.id" semantic conventions. It represents a unique identifier
+ // for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "client-5", "myhost@8742@s8083jm"
+ MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+ // MessagingConsumerGroupNameKey is the attribute Key conforming to the
+ // "messaging.consumer.group.name" semantic conventions. It represents the name
+ // of the consumer group with which a consumer is associated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-group", "indexer"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.consumer.group.name` is applicable and what it means in
+ // the context of that system.
+ MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the message
+ // destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MyQueue", "MyTopic"
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD uniquely
+ // identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationPartitionIDKey is the attribute Key conforming to the
+ // "messaging.destination.partition.id" semantic conventions. It represents the
+ // identifier of the partition messages are sent to or received from, unique
+ // within the `messaging.destination.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+ // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to
+ // the "messaging.destination.subscription.name" semantic conventions. It
+ // represents the name of the destination subscription from which a message is
+ // consumed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "subscription-a"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.destination.subscription.name` is applicable and what it
+ // means in the context of that system.
+ MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/customers/{customerId}"
+ // Note: Destination names could be constructed from templates. An example would
+ // be a destination name involving a user name or product id. Although the
+ // destination name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+
+ // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+ // represents the ack deadline in seconds set for the modify ack deadline
+ // request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+ // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the
+ // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+ // ack id for a given message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ack_id
+ MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+ // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions.
+ // It represents the delivery attempt for a given message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+ // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+ // represents the ordering key for a given message. If the attribute is not
+ // present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ordering_key
+ MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the message
+ // keys in Kafka are used for grouping alike messages to ensure they're
+ // processed on the same partition. They differ from `messaging.message.id` in
+ // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ // Note: If the key type is not string, it's string representation has to be
+ // supplied for the attribute. If the key has no unambiguous, canonical string
+ // form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents a
+ // boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+
+ // MessagingKafkaOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.offset" semantic conventions. It represents the offset of a
+ // record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the size of
+ // the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed body size. If
+ // both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents the
+ // conversation ID identifying the conversation to which the message belongs,
+ // represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: MyConversationId
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents the
+ // size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed size. If both
+ // sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used by
+ // the messaging system as an identifier for the message, represented as a
+ // string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 452a7c7c7c7048c2f887f61572b18fc2
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationNameKey is the attribute Key conforming to the
+ // "messaging.operation.name" semantic conventions. It represents the
+ // system-specific name of the messaging operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ack", "nack", "send"
+ MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+ // MessagingOperationTypeKey is the attribute Key conforming to the
+ // "messaging.operation.type" semantic conventions. It represents a string
+ // identifying the type of the messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+ // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to
+ // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It
+ // represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the
+ // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents
+ // the rabbitMQ message delivery tag.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+
+ // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the
+ // "messaging.rocketmq.consumption_model" semantic conventions. It represents
+ // the model of message consumption. This only applies to consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It
+ // represents the delay time level for delay message, which determines the
+ // message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming
+ // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions.
+ // It represents the timestamp in milliseconds that the delay message is
+ // expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents the it
+ // is essential for FIFO message. Messages that belong to the same message group
+ // are always processed one by one within the same consumer group.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myMessageGroup
+ MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents the
+ // key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "keyA", "keyB"
+ MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketMQMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: tagA
+ MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents the
+ // type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketMQNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myNamespace
+ MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to
+ // the "messaging.servicebus.disposition_status" semantic conventions. It
+ // represents the describes the [settlement type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock
+ MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+ // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.delivery_count" semantic conventions. It
+ // represents the number of deliveries that have been attempted for this
+ // message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+ // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+
+ // MessagingSystemKey is the attribute Key conforming to the "messaging.system"
+ // semantic conventions. It represents the messaging system as identified by the
+ // client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The actual messaging system may differ from the one known by the
+ // client. For example, when using Kafka client libraries to communicate with
+ // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the
+ // instrumentation's best knowledge.
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to the
+// "messaging.batch.message_count" semantic conventions. It represents the number
+// of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique identifier
+// for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingConsumerGroupName returns an attribute KeyValue conforming to the
+// "messaging.consumer.group.name" semantic conventions. It represents the name
+// of the consumer group with which a consumer is associated.
+func MessagingConsumerGroupName(val string) attribute.KeyValue {
+ return MessagingConsumerGroupNameKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the
+// "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be unnamed
+// or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name.
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming to
+// the "messaging.destination.partition.id" semantic conventions. It represents
+// the identifier of the partition messages are sent to or received from, unique
+// within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+ return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming
+// to the "messaging.destination.subscription.name" semantic conventions. It
+// represents the name of the destination subscription from which a message is
+// consumed.
+func MessagingDestinationSubscriptionName(val string) attribute.KeyValue {
+ return MessagingDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to the
+// "messaging.destination.template" semantic conventions. It represents the low
+// cardinality representation of the messaging destination name.
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to the
+// "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+// represents the UTC epoch seconds at which the message has been accepted and
+// stored in the entity.
+func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingEventHubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+// represents the ack deadline in seconds set for the modify ack deadline
+// request.
+func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the
+// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+// ack id for a given message.
+func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+// represents the ordering key for a given message. If the attribute is not
+// present, the message does not have an ordering key.
+func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageOrderingKeyKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the message
+// keys in Kafka are used for grouping alike messages to ensure they're processed
+// on the same partition. They differ from `messaging.message.id` in that they're
+// not unique. If the key is `null`, the attribute MUST NOT be set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.tombstone" semantic conventions. It represents a
+// boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// MessagingKafkaOffset returns an attribute KeyValue conforming to the
+// "messaging.kafka.offset" semantic conventions. It represents the offset of a
+// record in the corresponding Kafka partition.
+func MessagingKafkaOffset(val int) attribute.KeyValue {
+ return MessagingKafkaOffsetKey.Int(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size of
+// the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming to the
+// "messaging.message.conversation_id" semantic conventions. It represents the
+// conversation ID identifying the conversation to which the message belongs,
+// represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the
+// "messaging.message.envelope.size" semantic conventions. It represents the size
+// of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by the
+// messaging system as an identifier for the message, represented as a string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+ return MessagingOperationNameKey.String(val)
+}
+
+// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitMQDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming
+// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It
+// represents the rabbitMQ message delivery tag.
+func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue {
+ return MessagingRabbitMQMessageDeliveryTagKey.Int(val)
+}
+
+// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.group" semantic conventions. It represents the it
+// is essential for FIFO message. Messages that belong to the same message group
+// are always processed one by one within the same consumer group.
+func MessagingRocketMQMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageGroupKey.String(val)
+}
+
+// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.keys" semantic conventions. It represents the
+// key(s) of message, another way to mark message besides message id.
+func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketMQMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketMQMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageTagKey.String(val)
+}
+
+// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketMQNamespace(val string) attribute.KeyValue {
+ return MessagingRocketMQNamespaceKey.String(val)
+}
+
+// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has been
+// accepted and stored in the entity.
+func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageEnqueuedTimeKey.Int(val)
+}
+
+// Enum values for messaging.operation.type
+var (
+ // A message is created. "Create" spans always refer to a single message and are
+ // used to provide a unique creation context for messages in batch sending
+ // scenarios.
+ //
+ // Stability: development
+ MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+ // One or more messages are provided for sending to an intermediary. If a single
+ // message is sent, the context of the "Send" span can be used as the creation
+ // context and no "Create" span needs to be created.
+ //
+ // Stability: development
+ MessagingOperationTypeSend = MessagingOperationTypeKey.String("send")
+ // One or more messages are requested by a consumer. This operation refers to
+ // pull-based scenarios, where consumers explicitly call methods of messaging
+ // SDKs to receive messages.
+ //
+ // Stability: development
+ MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+ // One or more messages are processed by a consumer.
+ //
+ // Stability: development
+ MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process")
+ // One or more messages are settled.
+ //
+ // Stability: development
+ MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+)
+
+// Enum values for messaging.rocketmq.consumption_model
+var (
+ // Clustering consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting")
+)
+
+// Enum values for messaging.rocketmq.message.type
+var (
+ // Normal message
+ // Stability: development
+ MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal")
+ // FIFO message
+ // Stability: development
+ MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo")
+ // Delay message
+ // Stability: development
+ MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay")
+ // Transaction message
+ // Stability: development
+ MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction")
+)
+
+// Enum values for messaging.servicebus.disposition_status
+var (
+ // Message is completed
+ // Stability: development
+ MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete")
+ // Message is abandoned
+ // Stability: development
+ MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon")
+ // Message is sent to dead letter queue
+ // Stability: development
+ MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter")
+ // Message is deferred
+ // Stability: development
+ MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer")
+)
+
+// Enum values for messaging.system
+var (
+ // Apache ActiveMQ
+ // Stability: development
+ MessagingSystemActiveMQ = MessagingSystemKey.String("activemq")
+ // Amazon Simple Notification Service (SNS)
+ // Stability: development
+ MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns")
+ // Amazon Simple Queue Service (SQS)
+ // Stability: development
+ MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ // Stability: development
+ MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid")
+ // Azure Event Hubs
+ // Stability: development
+ MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs")
+ // Azure Service Bus
+ // Stability: development
+ MessagingSystemServiceBus = MessagingSystemKey.String("servicebus")
+ // Google Cloud Pub/Sub
+ // Stability: development
+ MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ // Stability: development
+ MessagingSystemJMS = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ // Stability: development
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ // Stability: development
+ MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ // Stability: development
+ MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq")
+ // Apache Pulsar
+ // Stability: development
+ MessagingSystemPulsar = MessagingSystemKey.String("pulsar")
+)
+
+// Namespace: network
+const (
+ // NetworkCarrierICCKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: DE
+ NetworkCarrierICCKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMCCKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+ // country code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 310
+ NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMNCKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+ // network code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 001
+ NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of the
+ // mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: sprint
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionStateKey is the attribute Key conforming to the
+ // "network.connection.state" semantic conventions. It represents the state of
+ // network connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "close_wait"
+ // Note: Connection states are defined as part of the [rfc9293]
+ //
+ // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2
+ NetworkConnectionStateKey = attribute.Key("network.connection.state")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the this
+ // describes more details regarding the connection.type. It may be the type of
+ // cell technology connection, but it could be used for describing details about
+ // a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: LTE
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the internet
+ // connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: wifi
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkInterfaceNameKey is the attribute Key conforming to the
+ // "network.interface.name" semantic conventions. It represents the network
+ // interface name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lo", "eth0"
+ NetworkInterfaceNameKey = attribute.Key("network.interface.name")
+
+ // NetworkIODirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "transmit"
+ NetworkIODirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port"
+ // semantic conventions. It represents the peer port number of the network
+ // connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the
+ // [OSI application layer] or non-OSI equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "amqp", "http", "mqtt"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the actual
+ // version of the protocol used for network communication.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.1", "2"
+ // Note: If protocol version is subject to negotiation (for example using [ALPN]
+ // ), this attribute SHOULD be set to the negotiated version. If the actual
+ // protocol version is not known, this attribute SHOULD NOT be set.
+ //
+ // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the
+ // [OSI transport layer] or [inter-process communication method].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "tcp", "udp"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port 12345.
+ //
+ // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer
+ // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic
+ // conventions. It represents the [OSI network layer] or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "ipv4", "ipv6"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI network layer]: https://wikipedia.org/wiki/Network_layer
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+// NetworkCarrierICC returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierICC(val string) attribute.KeyValue {
+ return NetworkCarrierICCKey.String(val)
+}
+
+// NetworkCarrierMCC returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMCC(val string) attribute.KeyValue {
+ return NetworkCarrierMCCKey.String(val)
+}
+
+// NetworkCarrierMNC returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMNC(val string) attribute.KeyValue {
+ return NetworkCarrierMNCKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkInterfaceName returns an attribute KeyValue conforming to the
+// "network.interface.name" semantic conventions. It represents the network
+// interface name.
+func NetworkInterfaceName(val string) attribute.KeyValue {
+ return NetworkInterfaceNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port number
+// of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// Enum values for network.connection.state
+var (
+ // closed
+ // Stability: development
+ NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed")
+ // close_wait
+ // Stability: development
+ NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait")
+ // closing
+ // Stability: development
+ NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing")
+ // established
+ // Stability: development
+ NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established")
+ // fin_wait_1
+ // Stability: development
+ NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1")
+ // fin_wait_2
+ // Stability: development
+ NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2")
+ // last_ack
+ // Stability: development
+ NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack")
+ // listen
+ // Stability: development
+ NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen")
+ // syn_received
+ // Stability: development
+ NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received")
+ // syn_sent
+ // Stability: development
+ NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent")
+ // time_wait
+ // Stability: development
+ NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait")
+)
+
+// Enum values for network.connection.subtype
+var (
+ // GPRS
+ // Stability: development
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ // Stability: development
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ // Stability: development
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ // Stability: development
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ // Stability: development
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ // Stability: development
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ // Stability: development
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ // Stability: development
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ // Stability: development
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ // Stability: development
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ // Stability: development
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ // Stability: development
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ // Stability: development
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ // Stability: development
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ // Stability: development
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ // Stability: development
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ // Stability: development
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ // Stability: development
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ // Stability: development
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ // Stability: development
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ // Stability: development
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+// Enum values for network.connection.type
+var (
+ // wifi
+ // Stability: development
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ // Stability: development
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ // Stability: development
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ // Stability: development
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ // Stability: development
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+// Enum values for network.io.direction
+var (
+ // transmit
+ // Stability: development
+ NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit")
+ // receive
+ // Stability: development
+ NetworkIODirectionReceive = NetworkIODirectionKey.String("receive")
+)
+
+// Enum values for network.transport
+var (
+ // TCP
+ // Stability: stable
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ // Stability: stable
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe.
+ // Stability: stable
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ // Stability: stable
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+ // QUIC
+ // Stability: stable
+ NetworkTransportQUIC = NetworkTransportKey.String("quic")
+)
+
+// Enum values for network.type
+var (
+ // IPv4
+ // Stability: stable
+ NetworkTypeIPv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ // Stability: stable
+ NetworkTypeIPv6 = NetworkTypeKey.String("ipv6")
+)
+
+// Namespace: nfs
+const (
+ // NfsOperationNameKey is the attribute Key conforming to the
+ // "nfs.operation.name" semantic conventions. It represents the NFSv4+ operation
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "OPEN", "READ", "GETATTR"
+ NfsOperationNameKey = attribute.Key("nfs.operation.name")
+
+ // NfsServerRepcacheStatusKey is the attribute Key conforming to the
+ // "nfs.server.repcache.status" semantic conventions. It represents the linux:
+ // one of "hit" (NFSD_STATS_RC_HITS), "miss" (NFSD_STATS_RC_MISSES), or
+ // "nocache" (NFSD_STATS_RC_NOCACHE -- uncacheable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: hit
+ NfsServerRepcacheStatusKey = attribute.Key("nfs.server.repcache.status")
+)
+
+// NfsOperationName returns an attribute KeyValue conforming to the
+// "nfs.operation.name" semantic conventions. It represents the NFSv4+ operation
+// name.
+func NfsOperationName(val string) attribute.KeyValue {
+ return NfsOperationNameKey.String(val)
+}
+
+// NfsServerRepcacheStatus returns an attribute KeyValue conforming to the
+// "nfs.server.repcache.status" semantic conventions. It represents the linux:
+// one of "hit" (NFSD_STATS_RC_HITS), "miss" (NFSD_STATS_RC_MISSES), or "nocache"
+// (NFSD_STATS_RC_NOCACHE -- uncacheable).
+func NfsServerRepcacheStatus(val string) attribute.KeyValue {
+ return NfsServerRepcacheStatusKey.String(val)
+}
+
+// Namespace: oci
+const (
+ // OCIManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of the
+ // OCI image manifest. For container images specifically is the digest by which
+ // the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4"
+ // Note: Follows [OCI Image Manifest Specification], and specifically the
+ // [Digest property].
+ // An example can be found in [Example Image Manifest].
+ //
+ // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md
+ // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests
+ // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest
+ OCIManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OCIManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OCIManifestDigest(val string) attribute.KeyValue {
+ return OCIManifestDigestKey.String(val)
+}
+
+// Namespace: onc_rpc
+const (
+ // OncRPCProcedureNameKey is the attribute Key conforming to the
+ // "onc_rpc.procedure.name" semantic conventions. It represents the ONC/Sun RPC
+ // procedure name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "OPEN", "READ", "GETATTR"
+ OncRPCProcedureNameKey = attribute.Key("onc_rpc.procedure.name")
+
+ // OncRPCProcedureNumberKey is the attribute Key conforming to the
+ // "onc_rpc.procedure.number" semantic conventions. It represents the ONC/Sun
+ // RPC procedure number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OncRPCProcedureNumberKey = attribute.Key("onc_rpc.procedure.number")
+
+ // OncRPCProgramNameKey is the attribute Key conforming to the
+ // "onc_rpc.program.name" semantic conventions. It represents the ONC/Sun RPC
+ // program name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "portmapper", "nfs"
+ OncRPCProgramNameKey = attribute.Key("onc_rpc.program.name")
+
+ // OncRPCVersionKey is the attribute Key conforming to the "onc_rpc.version"
+ // semantic conventions. It represents the ONC/Sun RPC program version.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OncRPCVersionKey = attribute.Key("onc_rpc.version")
+)
+
+// OncRPCProcedureName returns an attribute KeyValue conforming to the
+// "onc_rpc.procedure.name" semantic conventions. It represents the ONC/Sun RPC
+// procedure name.
+func OncRPCProcedureName(val string) attribute.KeyValue {
+ return OncRPCProcedureNameKey.String(val)
+}
+
+// OncRPCProcedureNumber returns an attribute KeyValue conforming to the
+// "onc_rpc.procedure.number" semantic conventions. It represents the ONC/Sun RPC
+// procedure number.
+func OncRPCProcedureNumber(val int) attribute.KeyValue {
+ return OncRPCProcedureNumberKey.Int(val)
+}
+
+// OncRPCProgramName returns an attribute KeyValue conforming to the
+// "onc_rpc.program.name" semantic conventions. It represents the ONC/Sun RPC
+// program name.
+func OncRPCProgramName(val string) attribute.KeyValue {
+ return OncRPCProgramNameKey.String(val)
+}
+
+// OncRPCVersion returns an attribute KeyValue conforming to the
+// "onc_rpc.version" semantic conventions. It represents the ONC/Sun RPC program
+// version.
+func OncRPCVersion(val int) attribute.KeyValue {
+ return OncRPCVersionKey.Int(val)
+}
+
+// Namespace: openai
+const (
+ // OpenAIRequestServiceTierKey is the attribute Key conforming to the
+ // "openai.request.service_tier" semantic conventions. It represents the service
+ // tier requested. May be a specific tier, default, or auto.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "auto", "default"
+ OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier")
+
+ // OpenAIResponseServiceTierKey is the attribute Key conforming to the
+ // "openai.response.service_tier" semantic conventions. It represents the
+ // service tier used for the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "scale", "default"
+ OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier")
+
+ // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the
+ // "openai.response.system_fingerprint" semantic conventions. It represents a
+ // fingerprint to track any eventual change in the Generative AI environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fp_44709d6fcb"
+ OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint")
+)
+
+// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the
+// "openai.response.service_tier" semantic conventions. It represents the service
+// tier used for the response.
+func OpenAIResponseServiceTier(val string) attribute.KeyValue {
+ return OpenAIResponseServiceTierKey.String(val)
+}
+
+// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to
+// the "openai.response.system_fingerprint" semantic conventions. It represents a
+// fingerprint to track any eventual change in the Generative AI environment.
+func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue {
+ return OpenAIResponseSystemFingerprintKey.String(val)
+}
+
+// Enum values for openai.request.service_tier
+var (
+ // The system will utilize scale tier credits until they are exhausted.
+ // Stability: development
+ OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto")
+ // The system will utilize the default scale tier.
+ // Stability: development
+ OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default")
+)
+
+// Namespace: openshift
+const (
+ // OpenShiftClusterquotaNameKey is the attribute Key conforming to the
+ // "openshift.clusterquota.name" semantic conventions. It represents the name of
+ // the cluster quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ OpenShiftClusterquotaNameKey = attribute.Key("openshift.clusterquota.name")
+
+ // OpenShiftClusterquotaUIDKey is the attribute Key conforming to the
+ // "openshift.clusterquota.uid" semantic conventions. It represents the UID of
+ // the cluster quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ OpenShiftClusterquotaUIDKey = attribute.Key("openshift.clusterquota.uid")
+)
+
+// OpenShiftClusterquotaName returns an attribute KeyValue conforming to the
+// "openshift.clusterquota.name" semantic conventions. It represents the name of
+// the cluster quota.
+func OpenShiftClusterquotaName(val string) attribute.KeyValue {
+ return OpenShiftClusterquotaNameKey.String(val)
+}
+
+// OpenShiftClusterquotaUID returns an attribute KeyValue conforming to the
+// "openshift.clusterquota.uid" semantic conventions. It represents the UID of
+// the cluster quota.
+func OpenShiftClusterquotaUID(val string) attribute.KeyValue {
+ return OpenShiftClusterquotaUIDKey.String(val)
+}
+
+// Namespace: opentracing
+const (
+ // OpenTracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the parent-child
+ // Reference type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+// Enum values for opentracing.ref_type
+var (
+ // The parent Span depends on the child Span in some capacity
+ // Stability: development
+ OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ // Stability: development
+ OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from")
+)
+
+// Namespace: os
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic
+ // conventions. It represents the unique identifier for a particular build or
+ // compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TQ3C.230805.001.B2", "20E247", "22621"
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to be
+ // parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS"
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version" semantic
+ // conventions. It represents the version string of the operating system as
+ // defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ OSVersionKey = attribute.Key("os.version")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the "os.description"
+// semantic conventions. It represents the human readable (not intended to be
+// parsed) OS version information, like e.g. reported by `ver` or
+// `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating system
+// as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// Enum values for os.type
+var (
+ // Microsoft Windows
+ // Stability: development
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ // Stability: development
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ // Stability: development
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ // Stability: development
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ // Stability: development
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ // Stability: development
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ // Stability: development
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ // Stability: development
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ // Stability: development
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ // Stability: development
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ // Stability: development
+ OSTypeZOS = OSTypeKey.String("zos")
+)
+
+// Namespace: otel
+const (
+ // OTelComponentNameKey is the attribute Key conforming to the
+ // "otel.component.name" semantic conventions. It represents a name uniquely
+ // identifying the instance of the OpenTelemetry component within its containing
+ // SDK instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otlp_grpc_span_exporter/0", "custom-name"
+ // Note: Implementations SHOULD ensure a low cardinality for this attribute,
+ // even across application or SDK restarts.
+ // E.g. implementations MUST NOT use UUIDs as values for this attribute.
+ //
+ // Implementations MAY achieve these goals by following a
+ // `/` pattern, e.g.
+ // `batching_span_processor/0`.
+ // Hereby `otel.component.type` refers to the corresponding attribute value of
+ // the component.
+ //
+ // The value of `instance-counter` MAY be automatically assigned by the
+ // component and uniqueness within the enclosing SDK instance MUST be
+ // guaranteed.
+ // For example, `` MAY be implemented by using a monotonically
+ // increasing counter (starting with `0`), which is incremented every time an
+ // instance of the given component type is started.
+ //
+ // With this implementation, for example the first Batching Span Processor would
+ // have `batching_span_processor/0`
+ // as `otel.component.name`, the second one `batching_span_processor/1` and so
+ // on.
+ // These values will therefore be reused in the case of an application restart.
+ OTelComponentNameKey = attribute.Key("otel.component.name")
+
+ // OTelComponentTypeKey is the attribute Key conforming to the
+ // "otel.component.type" semantic conventions. It represents a name identifying
+ // the type of the OpenTelemetry component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "batching_span_processor", "com.example.MySpanExporter"
+ // Note: If none of the standardized values apply, implementations SHOULD use
+ // the language-defined name of the type.
+ // E.g. for Java the fully qualified classname SHOULD be used in this case.
+ OTelComponentTypeKey = attribute.Key("otel.component.type")
+
+ // OTelEventNameKey is the attribute Key conforming to the "otel.event.name"
+ // semantic conventions. It represents the identifies the class / type of event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "browser.mouse.click", "device.app.lifecycle"
+ // Note: This attribute SHOULD be used by non-OTLP exporters when destination
+ // does not support `EventName` or equivalent field. This attribute MAY be used
+ // by applications using existing logging libraries so that it can be used to
+ // set the `EventName` field by Collector or SDK components.
+ OTelEventNameKey = attribute.Key("otel.event.name")
+
+ // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name"
+ // semantic conventions. It represents the name of the instrumentation scope - (
+ // `InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "io.opentelemetry.contrib.mongodb"
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeSchemaURLKey is the attribute Key conforming to the
+ // "otel.scope.schema_url" semantic conventions. It represents the schema URL of
+ // the instrumentation scope.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://opentelemetry.io/schemas/1.31.0"
+ OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of the
+ // instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.0.0"
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+
+ // OTelSpanParentOriginKey is the attribute Key conforming to the
+ // "otel.span.parent.origin" semantic conventions. It represents the determines
+ // whether the span has a parent span, and if so,
+ // [whether it is a remote parent].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin")
+
+ // OTelSpanSamplingResultKey is the attribute Key conforming to the
+ // "otel.span.sampling_result" semantic conventions. It represents the result
+ // value of the sampler for this span.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result")
+
+ // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code"
+ // semantic conventions. It represents the name of the code, either "OK" or
+ // "ERROR". MUST NOT be set if the status code is UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the description
+ // of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "resource not found"
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+// OTelComponentName returns an attribute KeyValue conforming to the
+// "otel.component.name" semantic conventions. It represents a name uniquely
+// identifying the instance of the OpenTelemetry component within its containing
+// SDK instance.
+func OTelComponentName(val string) attribute.KeyValue {
+ return OTelComponentNameKey.String(val)
+}
+
+// OTelEventName returns an attribute KeyValue conforming to the
+// "otel.event.name" semantic conventions. It represents the identifies the class
+// / type of event.
+func OTelEventName(val string) attribute.KeyValue {
+ return OTelEventNameKey.String(val)
+}
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeSchemaURL returns an attribute KeyValue conforming to the
+// "otel.scope.schema_url" semantic conventions. It represents the schema URL of
+// the instrumentation scope.
+func OTelScopeSchemaURL(val string) attribute.KeyValue {
+ return OTelScopeSchemaURLKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the description
+// of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// Enum values for otel.component.type
+var (
+ // The builtin SDK batching span processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor")
+ // The builtin SDK simple span processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor")
+ // The builtin SDK batching log record processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor")
+ // The builtin SDK simple log record processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor")
+ // OTLP span exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter")
+ // OTLP span exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter")
+ // OTLP span exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter")
+ // Zipkin span exporter over HTTP
+ //
+ // Stability: development
+ OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter")
+ // OTLP log record exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter")
+ // OTLP log record exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter")
+ // OTLP log record exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter")
+ // The builtin SDK periodically exporting metric reader
+ //
+ // Stability: development
+ OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader")
+ // OTLP metric exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter")
+ // OTLP metric exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter")
+ // OTLP metric exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter")
+ // Prometheus metric exporter over HTTP with the default text-based format
+ //
+ // Stability: development
+ OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter")
+)
+
+// Enum values for otel.span.parent.origin
+var (
+ // The span does not have a parent, it is a root span
+ // Stability: development
+ OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none")
+ // The span has a parent and the parent's span context [isRemote()] is false
+ // Stability: development
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local")
+ // The span has a parent and the parent's span context [isRemote()] is true
+ // Stability: development
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote")
+)
+
+// Enum values for otel.span.sampling_result
+var (
+ // The span is not sampled and not recording
+ // Stability: development
+ OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP")
+ // The span is not sampled, but recording
+ // Stability: development
+ OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY")
+ // The span is sampled and recording
+ // Stability: development
+ OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE")
+)
+
+// Enum values for otel.status_code
+var (
+ // The operation has been validated by an Application developer or Operator to
+ // have completed successfully.
+ // Stability: stable
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error.
+ // Stability: stable
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// Namespace: pprof
+const (
+ // PprofLocationIsFoldedKey is the attribute Key conforming to the
+ // "pprof.location.is_folded" semantic conventions. It represents the provides
+ // an indication that multiple symbols map to this location's address, for
+ // example due to identical code folding by the linker. In that case the line
+ // information represents one of the multiple symbols. This field must be
+ // recomputed when the symbolization state of the profile changes.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ PprofLocationIsFoldedKey = attribute.Key("pprof.location.is_folded")
+
+ // PprofMappingHasFilenamesKey is the attribute Key conforming to the
+ // "pprof.mapping.has_filenames" semantic conventions. It represents the
+ // indicates that there are filenames related to this mapping.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ PprofMappingHasFilenamesKey = attribute.Key("pprof.mapping.has_filenames")
+
+ // PprofMappingHasFunctionsKey is the attribute Key conforming to the
+ // "pprof.mapping.has_functions" semantic conventions. It represents the
+ // indicates that there are functions related to this mapping.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ PprofMappingHasFunctionsKey = attribute.Key("pprof.mapping.has_functions")
+
+ // PprofMappingHasInlineFramesKey is the attribute Key conforming to the
+ // "pprof.mapping.has_inline_frames" semantic conventions. It represents the
+ // indicates that there are inline frames related to this mapping.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ PprofMappingHasInlineFramesKey = attribute.Key("pprof.mapping.has_inline_frames")
+
+ // PprofMappingHasLineNumbersKey is the attribute Key conforming to the
+ // "pprof.mapping.has_line_numbers" semantic conventions. It represents the
+ // indicates that there are line numbers related to this mapping.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ PprofMappingHasLineNumbersKey = attribute.Key("pprof.mapping.has_line_numbers")
+
+ // PprofProfileCommentKey is the attribute Key conforming to the
+ // "pprof.profile.comment" semantic conventions. It represents the free-form
+ // text associated with the profile. This field should not be used to store any
+ // machine-readable information, it is only for human-friendly content.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "hello world", "bazinga"
+ PprofProfileCommentKey = attribute.Key("pprof.profile.comment")
+
+ // PprofProfileDocURLKey is the attribute Key conforming to the
+ // "pprof.profile.doc_url" semantic conventions. It represents the documentation
+ // link for this profile type.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "http://pprof.example.com/cpu-profile.html"
+ // Note: The URL must be absolute and may be missing if the profile was
+ // generated by code that did not supply a link
+ PprofProfileDocURLKey = attribute.Key("pprof.profile.doc_url")
+
+ // PprofProfileDropFramesKey is the attribute Key conforming to the
+ // "pprof.profile.drop_frames" semantic conventions. It represents the frames
+ // with Function.function_name fully matching the regexp will be dropped from
+ // the samples, along with their successors.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/foobar/"
+ PprofProfileDropFramesKey = attribute.Key("pprof.profile.drop_frames")
+
+ // PprofProfileKeepFramesKey is the attribute Key conforming to the
+ // "pprof.profile.keep_frames" semantic conventions. It represents the frames
+ // with Function.function_name fully matching the regexp will be kept, even if
+ // it matches drop_frames.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/bazinga/"
+ PprofProfileKeepFramesKey = attribute.Key("pprof.profile.keep_frames")
+)
+
+// PprofLocationIsFolded returns an attribute KeyValue conforming to the
+// "pprof.location.is_folded" semantic conventions. It represents the provides an
+// indication that multiple symbols map to this location's address, for example
+// due to identical code folding by the linker. In that case the line information
+// represents one of the multiple symbols. This field must be recomputed when the
+// symbolization state of the profile changes.
+func PprofLocationIsFolded(val bool) attribute.KeyValue {
+ return PprofLocationIsFoldedKey.Bool(val)
+}
+
+// PprofMappingHasFilenames returns an attribute KeyValue conforming to the
+// "pprof.mapping.has_filenames" semantic conventions. It represents the
+// indicates that there are filenames related to this mapping.
+func PprofMappingHasFilenames(val bool) attribute.KeyValue {
+ return PprofMappingHasFilenamesKey.Bool(val)
+}
+
+// PprofMappingHasFunctions returns an attribute KeyValue conforming to the
+// "pprof.mapping.has_functions" semantic conventions. It represents the
+// indicates that there are functions related to this mapping.
+func PprofMappingHasFunctions(val bool) attribute.KeyValue {
+ return PprofMappingHasFunctionsKey.Bool(val)
+}
+
+// PprofMappingHasInlineFrames returns an attribute KeyValue conforming to the
+// "pprof.mapping.has_inline_frames" semantic conventions. It represents the
+// indicates that there are inline frames related to this mapping.
+func PprofMappingHasInlineFrames(val bool) attribute.KeyValue {
+ return PprofMappingHasInlineFramesKey.Bool(val)
+}
+
+// PprofMappingHasLineNumbers returns an attribute KeyValue conforming to the
+// "pprof.mapping.has_line_numbers" semantic conventions. It represents the
+// indicates that there are line numbers related to this mapping.
+func PprofMappingHasLineNumbers(val bool) attribute.KeyValue {
+ return PprofMappingHasLineNumbersKey.Bool(val)
+}
+
+// PprofProfileComment returns an attribute KeyValue conforming to the
+// "pprof.profile.comment" semantic conventions. It represents the free-form text
+// associated with the profile. This field should not be used to store any
+// machine-readable information, it is only for human-friendly content.
+func PprofProfileComment(val ...string) attribute.KeyValue {
+ return PprofProfileCommentKey.StringSlice(val)
+}
+
+// PprofProfileDocURL returns an attribute KeyValue conforming to the
+// "pprof.profile.doc_url" semantic conventions. It represents the documentation
+// link for this profile type.
+func PprofProfileDocURL(val string) attribute.KeyValue {
+ return PprofProfileDocURLKey.String(val)
+}
+
+// PprofProfileDropFrames returns an attribute KeyValue conforming to the
+// "pprof.profile.drop_frames" semantic conventions. It represents the frames
+// with Function.function_name fully matching the regexp will be dropped from the
+// samples, along with their successors.
+func PprofProfileDropFrames(val string) attribute.KeyValue {
+ return PprofProfileDropFramesKey.String(val)
+}
+
+// PprofProfileKeepFrames returns an attribute KeyValue conforming to the
+// "pprof.profile.keep_frames" semantic conventions. It represents the frames
+// with Function.function_name fully matching the regexp will be kept, even if it
+// matches drop_frames.
+func PprofProfileKeepFrames(val string) attribute.KeyValue {
+ return PprofProfileKeepFramesKey.String(val)
+}
+
+// Namespace: process
+const (
+ // ProcessArgsCountKey is the attribute Key conforming to the
+ // "process.args_count" semantic conventions. It represents the length of the
+ // process.command_args array.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 4
+ // Note: This field can be useful for querying or performing bucket analysis on
+ // how many arguments were provided to start a process. More arguments may be an
+ // indication of suspicious activity.
+ ProcessArgsCountKey = attribute.Key("process.args_count")
+
+ // ProcessCommandKey is the attribute Key conforming to the "process.command"
+ // semantic conventions. It represents the command used to launch the process
+ // (i.e. the command name). On Linux based systems, can be set to the zeroth
+ // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter
+ // extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otelcol"
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received by
+ // the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this
+ // would be the full argv vector passed to `main`. SHOULD NOT be collected by
+ // default unless there is sanitization that excludes sensitive data.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otecol", "--config=config.yaml"
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full command
+ // used to launch the process as a single string representing the full command.
+ // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+ // you have to assemble it just for monitoring; use `process.command_args`
+ // instead. SHOULD NOT be collected by default unless there is sanitization that
+ // excludes sensitive data.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "C:\cmd\otecol --config="my directory\config.yaml""
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessContextSwitchTypeKey is the attribute Key conforming to the
+ // "process.context_switch.type" semantic conventions. It represents the
+ // specifies whether the context switches for this data point were voluntary or
+ // involuntary.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessContextSwitchTypeKey = attribute.Key("process.context_switch.type")
+
+ // ProcessCreationTimeKey is the attribute Key conforming to the
+ // "process.creation.time" semantic conventions. It represents the date and time
+ // the process was created, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:25:34.853Z"
+ ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+ // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the
+ // "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+ // build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "c89b11207f6479603b0d49bf291c092c2b719293"
+ ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu")
+
+ // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the
+ // "process.executable.build_id.go" semantic conventions. It represents the Go
+ // build ID as retrieved by `go tool buildid `.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY"
+ ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go")
+
+ // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the
+ // "process.executable.build_id.htlhash" semantic conventions. It represents the
+ // profiling specific build ID for executables. See the OTel specification for
+ // Profiles for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "600DCAFE4A110000F2BF38C493F5FB92"
+ ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name of the
+ // process executable. On Linux based systems, this SHOULD be set to the base
+ // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to
+ // the base name of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcol"
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full path
+ // to the process executable. On Linux based systems, can be set to the target
+ // of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/cmd/otelcol"
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code"
+ // semantic conventions. It represents the exit code of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 127
+ ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+ // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time"
+ // semantic conventions. It represents the date and time the process exited, in
+ // ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:26:12.315Z"
+ ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+ // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+ // "process.group_leader.pid" semantic conventions. It represents the PID of the
+ // process's group leader. This is also the process group ID (PGID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 23
+ ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+ // ProcessInteractiveKey is the attribute Key conforming to the
+ // "process.interactive" semantic conventions. It represents the whether the
+ // process is connected to an interactive shell.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessInteractiveKey = attribute.Key("process.interactive")
+
+ // ProcessLinuxCgroupKey is the attribute Key conforming to the
+ // "process.linux.cgroup" semantic conventions. It represents the control group
+ // associated with the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope",
+ // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope"
+ // Note: Control groups (cgroups) are a kernel feature used to organize and
+ // manage process resources. This attribute provides the path(s) to the
+ // cgroup(s) associated with the process, which should match the contents of the
+ // [/proc/[PID]/cgroup] file.
+ //
+ // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html
+ ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent Process
+ // identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic
+ // conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRealUserIDKey is the attribute Key conforming to the
+ // "process.real_user.id" semantic conventions. It represents the real user ID
+ // (RUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000
+ ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+ // ProcessRealUserNameKey is the attribute Key conforming to the
+ // "process.real_user.name" semantic conventions. It represents the username of
+ // the real user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of the
+ // runtime of this process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "OpenJDK Runtime Environment"
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the version of
+ // the runtime of this process, as returned by the runtime without modification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14.0.2
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessSavedUserIDKey is the attribute Key conforming to the
+ // "process.saved_user.id" semantic conventions. It represents the saved user ID
+ // (SUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1002
+ ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+ // ProcessSavedUserNameKey is the attribute Key conforming to the
+ // "process.saved_user.name" semantic conventions. It represents the username of
+ // the saved user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+ // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+ // "process.session_leader.pid" semantic conventions. It represents the PID of
+ // the process's session leader. This is also the session ID (SID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14
+ ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+ // ProcessStateKey is the attribute Key conforming to the "process.state"
+ // semantic conventions. It represents the process state, e.g.,
+ // [Linux Process State Codes].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "running"
+ //
+ // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES
+ ProcessStateKey = attribute.Key("process.state")
+
+ // ProcessTitleKey is the attribute Key conforming to the "process.title"
+ // semantic conventions. It represents the process title (proctitle).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cat /etc/hostname", "xfce4-session", "bash"
+ // Note: In many Unix-like systems, process title (proctitle), is the string
+ // that represents the name or command line of a running process, displayed by
+ // system monitoring tools like ps, top, and htop.
+ ProcessTitleKey = attribute.Key("process.title")
+
+ // ProcessUserIDKey is the attribute Key conforming to the "process.user.id"
+ // semantic conventions. It represents the effective user ID (EUID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1001
+ ProcessUserIDKey = attribute.Key("process.user.id")
+
+ // ProcessUserNameKey is the attribute Key conforming to the "process.user.name"
+ // semantic conventions. It represents the username of the effective user of the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessUserNameKey = attribute.Key("process.user.name")
+
+ // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic
+ // conventions. It represents the virtual process identifier.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12
+ // Note: The process ID within a PID namespace. This is not necessarily unique
+ // across all processes on the host but it is unique within the process
+ // namespace that the process exists within.
+ ProcessVpidKey = attribute.Key("process.vpid")
+
+ // ProcessWorkingDirectoryKey is the attribute Key conforming to the
+ // "process.working_directory" semantic conventions. It represents the working
+ // directory of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/root"
+ ProcessWorkingDirectoryKey = attribute.Key("process.working_directory")
+)
+
+// ProcessArgsCount returns an attribute KeyValue conforming to the
+// "process.args_count" semantic conventions. It represents the length of the
+// process.command_args array.
+func ProcessArgsCount(val int) attribute.KeyValue {
+ return ProcessArgsCountKey.Int(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be set
+// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the
+// first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the command
+// arguments (including the command/executable itself) as received by the
+// process. On Linux-based systems (and some other Unixoid systems supporting
+// procfs), can be set according to the list of null-delimited strings extracted
+// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full
+// argv vector passed to `main`. SHOULD NOT be collected by default unless there
+// is sanitization that excludes sensitive data.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+// you have to assemble it just for monitoring; use `process.command_args`
+// instead. SHOULD NOT be collected by default unless there is sanitization that
+// excludes sensitive data.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and time
+// the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+ return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the
+// "process.environment_variable" semantic conventions. It represents the process
+// environment variables, `` being the environment variable name, the value
+// being the environment variable value.
+func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue {
+ return attribute.String("process.environment_variable."+key, val)
+}
+
+// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the
+// "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+// build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGNUKey.String(val)
+}
+
+// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the
+// "process.executable.build_id.go" semantic conventions. It represents the Go
+// build ID as retrieved by `go tool buildid `.
+func ProcessExecutableBuildIDGo(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGoKey.String(val)
+}
+
+// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to
+// the "process.executable.build_id.htlhash" semantic conventions. It represents
+// the profiling specific build ID for executables. See the OTel specification
+// for Profiles for more information.
+func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDHtlhashKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of the
+// process executable. On Linux based systems, this SHOULD be set to the base
+// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the
+// base name of `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path to
+// the process executable. On Linux based systems, can be set to the target of
+// `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+ return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time the
+// process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+ return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of the
+// process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+ return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+ return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessLinuxCgroup returns an attribute KeyValue conforming to the
+// "process.linux.cgroup" semantic conventions. It represents the control group
+// associated with the process.
+func ProcessLinuxCgroup(val string) attribute.KeyValue {
+ return ProcessLinuxCgroupKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the "process.owner"
+// semantic conventions. It represents the username of the user that owns the
+// process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+ return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+ return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user ID
+// (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+ return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username of
+// the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+ return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+ return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessTitle returns an attribute KeyValue conforming to the "process.title"
+// semantic conventions. It represents the process title (proctitle).
+func ProcessTitle(val string) attribute.KeyValue {
+ return ProcessTitleKey.String(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+ return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+ return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid"
+// semantic conventions. It represents the virtual process identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+ return ProcessVpidKey.Int(val)
+}
+
+// ProcessWorkingDirectory returns an attribute KeyValue conforming to the
+// "process.working_directory" semantic conventions. It represents the working
+// directory of the process.
+func ProcessWorkingDirectory(val string) attribute.KeyValue {
+ return ProcessWorkingDirectoryKey.String(val)
+}
+
+// Enum values for process.context_switch.type
+var (
+ // voluntary
+ // Stability: development
+ ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+ // involuntary
+ // Stability: development
+ ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+// Enum values for process.state
+var (
+ // running
+ // Stability: development
+ ProcessStateRunning = ProcessStateKey.String("running")
+ // sleeping
+ // Stability: development
+ ProcessStateSleeping = ProcessStateKey.String("sleeping")
+ // stopped
+ // Stability: development
+ ProcessStateStopped = ProcessStateKey.String("stopped")
+ // defunct
+ // Stability: development
+ ProcessStateDefunct = ProcessStateKey.String("defunct")
+)
+
+// Namespace: profile
+const (
+ // ProfileFrameTypeKey is the attribute Key conforming to the
+ // "profile.frame.type" semantic conventions. It represents the describes the
+ // interpreter or compiler of a single frame.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cpython"
+ ProfileFrameTypeKey = attribute.Key("profile.frame.type")
+)
+
+// Enum values for profile.frame.type
+var (
+ // [.NET]
+ //
+ // Stability: development
+ //
+ // [.NET]: https://wikipedia.org/wiki/.NET
+ ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet")
+ // [JVM]
+ //
+ // Stability: development
+ //
+ // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine
+ ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm")
+ // [Kernel]
+ //
+ // Stability: development
+ //
+ // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system)
+ ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel")
+ // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a
+ // more precise value MUST be used.
+ //
+ // Stability: development
+ //
+ // [C]: https://wikipedia.org/wiki/C_(programming_language)
+ // [C++]: https://wikipedia.org/wiki/C%2B%2B
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeNative = ProfileFrameTypeKey.String("native")
+ // [Perl]
+ //
+ // Stability: development
+ //
+ // [Perl]: https://wikipedia.org/wiki/Perl
+ ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl")
+ // [PHP]
+ //
+ // Stability: development
+ //
+ // [PHP]: https://wikipedia.org/wiki/PHP
+ ProfileFrameTypePHP = ProfileFrameTypeKey.String("php")
+ // [Python]
+ //
+ // Stability: development
+ //
+ // [Python]: https://wikipedia.org/wiki/Python_(programming_language)
+ ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython")
+ // [Ruby]
+ //
+ // Stability: development
+ //
+ // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language)
+ ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby")
+ // [V8JS]
+ //
+ // Stability: development
+ //
+ // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine)
+ ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js")
+ // [Erlang]
+ //
+ // Stability: development
+ //
+ // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine)
+ ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam")
+ // [Go],
+ //
+ // Stability: development
+ //
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ ProfileFrameTypeGo = ProfileFrameTypeKey.String("go")
+ // [Rust]
+ //
+ // Stability: development
+ //
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust")
+)
+
+// Namespace: rpc
+const (
+ // RPCMessageCompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+ // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+ // semantic conventions. It MUST be calculated as two different counters
+ // starting from `1` one for sent messages and one for received message..
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+ // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type"
+ // semantic conventions. It represents the whether this is a received or sent
+ // message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+ // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic
+ // conventions. It represents the fully-qualified logical name of the method
+ // from the RPC interface perspective.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com.example.ExampleService/exampleMethod", "EchoService/Echo",
+ // "_OTHER"
+ // Note: The method name MAY have unbounded cardinality in edge or error cases.
+ //
+ // Some RPC frameworks or libraries provide a fixed set of recognized methods
+ // for client stubs and server implementations. Instrumentations for such
+ // frameworks MUST set this attribute to the original method name only
+ // when the method is recognized by the framework or library.
+ //
+ // When the method is not recognized, for example, when the server receives
+ // a request for a method that is not predefined on the server, or when
+ // instrumentation is not able to reliably detect if the method is predefined,
+ // the attribute MUST be set to `_OTHER`. In such cases, tracing
+ // instrumentations MUST also set `rpc.method_original` attribute to
+ // the original method value.
+ //
+ // If the RPC instrumentation could end up converting valid RPC methods to
+ // `_OTHER`, then it SHOULD provide a way to configure the list of recognized
+ // RPC methods.
+ //
+ // The `rpc.method` can be different from the name of any implementing
+ // method/function.
+ // The `code.function.name` attribute may be used to record the fully-qualified
+ // method actually executing the call on the server side, or the
+ // RPC client stub method on the client side.
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCMethodOriginalKey is the attribute Key conforming to the
+ // "rpc.method_original" semantic conventions. It represents the original name
+ // of the method used by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com.myservice.EchoService/catchAll",
+ // "com.myservice.EchoService/unknownMethod", "InvalidMethod"
+ RPCMethodOriginalKey = attribute.Key("rpc.method_original")
+
+ // RPCResponseStatusCodeKey is the attribute Key conforming to the
+ // "rpc.response.status_code" semantic conventions. It represents the status
+ // code of the RPC returned by the RPC server or generated by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "OK", "DEADLINE_EXCEEDED", "-32602"
+ // Note: Usually it represents an error code, but may also represent partial
+ // success, warning, or differentiate between various types of successful
+ // outcomes.
+ // Semantic conventions for individual RPC frameworks SHOULD document what
+ // `rpc.response.status_code` means in the context of that system and which
+ // values are considered to represent errors.
+ RPCResponseStatusCodeKey = attribute.Key("rpc.response.status_code")
+
+ // RPCSystemNameKey is the attribute Key conforming to the "rpc.system.name"
+ // semantic conventions. It represents the Remote Procedure Call (RPC) system.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The client and server RPC systems may differ for the same RPC
+ // interaction. For example, a client may use Apache Dubbo or Connect RPC to
+ // communicate with a server that uses gRPC since both protocols provide
+ // compatibility with gRPC.
+ RPCSystemNameKey = attribute.Key("rpc.system.name")
+)
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+ return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id"
+// semantic conventions. It MUST be calculated as two different counters starting
+// from `1` one for sent messages and one for received message..
+func RPCMessageID(val int) attribute.KeyValue {
+ return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+ return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the fully-qualified logical name of the
+// method from the RPC interface perspective.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCMethodOriginal returns an attribute KeyValue conforming to the
+// "rpc.method_original" semantic conventions. It represents the original name of
+// the method used by the client.
+func RPCMethodOriginal(val string) attribute.KeyValue {
+ return RPCMethodOriginalKey.String(val)
+}
+
+// RPCRequestMetadata returns an attribute KeyValue conforming to the
+// "rpc.request.metadata" semantic conventions. It represents the RPC request
+// metadata, `` being the normalized RPC metadata key (lowercase), the value
+// being the metadata values.
+func RPCRequestMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.request.metadata."+key, val)
+}
+
+// RPCResponseMetadata returns an attribute KeyValue conforming to the
+// "rpc.response.metadata" semantic conventions. It represents the RPC response
+// metadata, `` being the normalized RPC metadata key (lowercase), the value
+// being the metadata values.
+func RPCResponseMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.response.metadata."+key, val)
+}
+
+// RPCResponseStatusCode returns an attribute KeyValue conforming to the
+// "rpc.response.status_code" semantic conventions. It represents the status code
+// of the RPC returned by the RPC server or generated by the client.
+func RPCResponseStatusCode(val string) attribute.KeyValue {
+ return RPCResponseStatusCodeKey.String(val)
+}
+
+// Enum values for rpc.message.type
+var (
+ // sent
+ // Stability: development
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ // received
+ // Stability: development
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+// Enum values for rpc.system.name
+var (
+ // [gRPC]
+ // Stability: development
+ //
+ // [gRPC]: https://grpc.io/
+ RPCSystemNameGRPC = RPCSystemNameKey.String("grpc")
+ // [Apache Dubbo]
+ // Stability: development
+ //
+ // [Apache Dubbo]: https://dubbo.apache.org/
+ RPCSystemNameDubbo = RPCSystemNameKey.String("dubbo")
+ // [Connect RPC]
+ // Stability: development
+ //
+ // [Connect RPC]: https://connectrpc.com/
+ RPCSystemNameConnectrpc = RPCSystemNameKey.String("connectrpc")
+ // [JSON-RPC]
+ // Stability: development
+ //
+ // [JSON-RPC]: https://www.jsonrpc.org/
+ RPCSystemNameJSONRPC = RPCSystemNameKey.String("jsonrpc")
+)
+
+// Namespace: security_rule
+const (
+ // SecurityRuleCategoryKey is the attribute Key conforming to the
+ // "security_rule.category" semantic conventions. It represents a categorization
+ // value keyword used by the entity using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Attempted Information Leak"
+ SecurityRuleCategoryKey = attribute.Key("security_rule.category")
+
+ // SecurityRuleDescriptionKey is the attribute Key conforming to the
+ // "security_rule.description" semantic conventions. It represents the
+ // description of the rule generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Block requests to public DNS over HTTPS / TLS protocols"
+ SecurityRuleDescriptionKey = attribute.Key("security_rule.description")
+
+ // SecurityRuleLicenseKey is the attribute Key conforming to the
+ // "security_rule.license" semantic conventions. It represents the name of the
+ // license under which the rule used to generate this event is made available.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apache 2.0"
+ SecurityRuleLicenseKey = attribute.Key("security_rule.license")
+
+ // SecurityRuleNameKey is the attribute Key conforming to the
+ // "security_rule.name" semantic conventions. It represents the name of the rule
+ // or signature generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BLOCK_DNS_over_TLS"
+ SecurityRuleNameKey = attribute.Key("security_rule.name")
+
+ // SecurityRuleReferenceKey is the attribute Key conforming to the
+ // "security_rule.reference" semantic conventions. It represents the reference
+ // URL to additional information about the rule used to generate this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS"
+ // Note: The URL can point to the vendor’s documentation about the rule. If
+ // that’s not available, it can also be a link to a more general page
+ // describing this type of alert.
+ SecurityRuleReferenceKey = attribute.Key("security_rule.reference")
+
+ // SecurityRuleRulesetNameKey is the attribute Key conforming to the
+ // "security_rule.ruleset.name" semantic conventions. It represents the name of
+ // the ruleset, policy, group, or parent category in which the rule used to
+ // generate this event is a member.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Standard_Protocol_Filters"
+ SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name")
+
+ // SecurityRuleUUIDKey is the attribute Key conforming to the
+ // "security_rule.uuid" semantic conventions. It represents a rule ID that is
+ // unique within the scope of a set or group of agents, observers, or other
+ // entities using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011"
+ SecurityRuleUUIDKey = attribute.Key("security_rule.uuid")
+
+ // SecurityRuleVersionKey is the attribute Key conforming to the
+ // "security_rule.version" semantic conventions. It represents the version /
+ // revision of the rule being used for analysis.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.0.0"
+ SecurityRuleVersionKey = attribute.Key("security_rule.version")
+)
+
+// SecurityRuleCategory returns an attribute KeyValue conforming to the
+// "security_rule.category" semantic conventions. It represents a categorization
+// value keyword used by the entity using the rule for detection of this event.
+func SecurityRuleCategory(val string) attribute.KeyValue {
+ return SecurityRuleCategoryKey.String(val)
+}
+
+// SecurityRuleDescription returns an attribute KeyValue conforming to the
+// "security_rule.description" semantic conventions. It represents the
+// description of the rule generating the event.
+func SecurityRuleDescription(val string) attribute.KeyValue {
+ return SecurityRuleDescriptionKey.String(val)
+}
+
+// SecurityRuleLicense returns an attribute KeyValue conforming to the
+// "security_rule.license" semantic conventions. It represents the name of the
+// license under which the rule used to generate this event is made available.
+func SecurityRuleLicense(val string) attribute.KeyValue {
+ return SecurityRuleLicenseKey.String(val)
+}
+
+// SecurityRuleName returns an attribute KeyValue conforming to the
+// "security_rule.name" semantic conventions. It represents the name of the rule
+// or signature generating the event.
+func SecurityRuleName(val string) attribute.KeyValue {
+ return SecurityRuleNameKey.String(val)
+}
+
+// SecurityRuleReference returns an attribute KeyValue conforming to the
+// "security_rule.reference" semantic conventions. It represents the reference
+// URL to additional information about the rule used to generate this event.
+func SecurityRuleReference(val string) attribute.KeyValue {
+ return SecurityRuleReferenceKey.String(val)
+}
+
+// SecurityRuleRulesetName returns an attribute KeyValue conforming to the
+// "security_rule.ruleset.name" semantic conventions. It represents the name of
+// the ruleset, policy, group, or parent category in which the rule used to
+// generate this event is a member.
+func SecurityRuleRulesetName(val string) attribute.KeyValue {
+ return SecurityRuleRulesetNameKey.String(val)
+}
+
+// SecurityRuleUUID returns an attribute KeyValue conforming to the
+// "security_rule.uuid" semantic conventions. It represents a rule ID that is
+// unique within the scope of a set or group of agents, observers, or other
+// entities using the rule for detection of this event.
+func SecurityRuleUUID(val string) attribute.KeyValue {
+ return SecurityRuleUUIDKey.String(val)
+}
+
+// SecurityRuleVersion returns an attribute KeyValue conforming to the
+// "security_rule.version" semantic conventions. It represents the version /
+// revision of the rule being used for analysis.
+func SecurityRuleVersion(val string) attribute.KeyValue {
+ return SecurityRuleVersionKey.String(val)
+}
+
+// Namespace: server
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.address` SHOULD represent the server address behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port" semantic
+ // conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.port` SHOULD represent the server port behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the "server.address"
+// semantic conventions. It represents the server domain name if available
+// without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// Namespace: service
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID of
+ // the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "627cc493-f310-47de-96bd-71410b7dec09"
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be globally
+ // unique). The ID helps to
+ // distinguish instances of the same service that exist at the same time (e.g.
+ // instances of a horizontally scaled
+ // service).
+ //
+ // Implementations, such as SDKs, are recommended to generate a random Version 1
+ // or Version 4 [RFC
+ // 4122] UUID, but are free to use an inherent unique ID as
+ // the source of
+ // this value if stability is desirable. In that case, the ID SHOULD be used as
+ // source of a UUID Version 5 and
+ // SHOULD use the following UUID as the namespace:
+ // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+ //
+ // UUIDs are typically recommended, as only an opaque value for the purposes of
+ // identifying a service instance is
+ // needed. Similar to what can be seen in the man page for the
+ // [`/etc/machine-id`] file, the underlying
+ // data, such as pod name and namespace should be treated as confidential, being
+ // the user's choice to expose it
+ // or not via another resource attribute.
+ //
+ // For applications running behind an application server (like unicorn), we do
+ // not recommend using one identifier
+ // for all processes participating in the application. Instead, it's recommended
+ // each division (e.g. a worker
+ // thread in unicorn) to have its own instance.id.
+ //
+ // It's not recommended for a Collector to set `service.instance.id` if it can't
+ // unambiguously determine the
+ // service instance that is generating that telemetry. For instance, creating an
+ // UUID based on `pod.name` will
+ // likely be wrong, as the Collector might not know from which container within
+ // that pod the telemetry originated.
+ // However, Collectors can set the `service.instance.id` if they can
+ // unambiguously determine the service instance
+ // for that telemetry. This is typically the case for scraping receivers, as
+ // they know the target address and
+ // port.
+ //
+ // [RFC
+ // 4122]: https://www.ietf.org/rfc/rfc4122.txt
+ // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNameKey is the attribute Key conforming to the "service.name" semantic
+ // conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "shoppingcart"
+ // Note: MUST be the same for all instances of horizontally scaled services. If
+ // the value was not specified, SDKs MUST fallback to `unknown_service:`
+ // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`.
+ // If `process.executable.name` is not available, the value MUST be set to
+ // `unknown_service`.
+ //
+ // [`process.executable.name`]: process.md
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Shop"
+ // Note: A string value having a meaning that helps to distinguish a group of
+ // services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name` is
+ // expected to be unique for all services that have no explicit namespace
+ // defined (so the empty/unspecified namespace is simply one more valid
+ // namespace). Zero-length namespace string is assumed equal to unspecified
+ // namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServicePeerNameKey is the attribute Key conforming to the "service.peer.name"
+ // semantic conventions. It represents the logical name of the service on the
+ // other side of the connection. SHOULD be equal to the actual [`service.name`]
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "shoppingcart"
+ //
+ // [`service.name`]: /docs/resource/README.md#service
+ ServicePeerNameKey = attribute.Key("service.peer.name")
+
+ // ServicePeerNamespaceKey is the attribute Key conforming to the
+ // "service.peer.namespace" semantic conventions. It represents the logical
+ // namespace of the service on the other side of the connection. SHOULD be equal
+ // to the actual [`service.namespace`] resource attribute of the remote service
+ // if any.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Shop"
+ //
+ // [`service.namespace`]: /docs/resource/README.md#service
+ ServicePeerNamespaceKey = attribute.Key("service.peer.namespace")
+
+ // ServiceVersionKey is the attribute Key conforming to the "service.version"
+ // semantic conventions. It represents the version string of the service
+ // component. The format is not defined by these conventions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "2.0.0", "a01dbef8a"
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of the
+// service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the "service.name"
+// semantic conventions. It represents the logical name of the service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServicePeerName returns an attribute KeyValue conforming to the
+// "service.peer.name" semantic conventions. It represents the logical name of
+// the service on the other side of the connection. SHOULD be equal to the actual
+// [`service.name`] resource attribute of the remote service if any.
+//
+// [`service.name`]: /docs/resource/README.md#service
+func ServicePeerName(val string) attribute.KeyValue {
+ return ServicePeerNameKey.String(val)
+}
+
+// ServicePeerNamespace returns an attribute KeyValue conforming to the
+// "service.peer.namespace" semantic conventions. It represents the logical
+// namespace of the service on the other side of the connection. SHOULD be equal
+// to the actual [`service.namespace`] resource attribute of the remote service
+// if any.
+//
+// [`service.namespace`]: /docs/resource/README.md#service
+func ServicePeerNamespace(val string) attribute.KeyValue {
+ return ServicePeerNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service component. The format is not defined by these conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// Namespace: session
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id" semantic
+ // conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
+
+// Namespace: signalr
+const (
+ // SignalRConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the signalR
+ // HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "app_shutdown", "timeout"
+ SignalRConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalRTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the
+ // [SignalR transport type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "web_sockets", "long_polling"
+ //
+ // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md
+ SignalRTransportKey = attribute.Key("signalr.transport")
+)
+
+// Enum values for signalr.connection.status
+var (
+ // The connection was closed normally.
+ // Stability: stable
+ SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout.
+ // Stability: stable
+ SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down.
+ // Stability: stable
+ SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown")
+)
+
+// Enum values for signalr.transport
+var (
+ // ServerSentEvents protocol
+ // Stability: stable
+ SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ // Stability: stable
+ SignalRTransportLongPolling = SignalRTransportKey.String("long_polling")
+ // WebSockets protocol
+ // Stability: stable
+ SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets")
+)
+
+// Namespace: source
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the destination side, and when communicating through
+ // an intermediary, `source.address` SHOULD represent the source address behind
+ // any intermediaries, for example proxies, if it's available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port" semantic
+ // conventions. It represents the source port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the "source.address"
+// semantic conventions. It represents the source address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number.
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Namespace: system
+const (
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "(identifier)"
+ SystemDeviceKey = attribute.Key("system.device")
+
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the filesystem
+ // mode.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "rw, ro"
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/mnt/data"
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the filesystem
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "used"
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the filesystem
+ // type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ext4"
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+
+ // SystemMemoryLinuxSlabStateKey is the attribute Key conforming to the
+ // "system.memory.linux.slab.state" semantic conventions. It represents the
+ // Linux Slab memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "reclaimable", "unreclaimable"
+ SystemMemoryLinuxSlabStateKey = attribute.Key("system.memory.linux.slab.state")
+
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free", "cached"
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "in"
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingFaultTypeKey is the attribute Key conforming to the
+ // "system.paging.fault.type" semantic conventions. It represents the paging
+ // fault type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "minor"
+ SystemPagingFaultTypeKey = attribute.Key("system.paging.fault.type")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory paging
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free"
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the "system.device"
+// semantic conventions. It represents the device identifier.
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode.
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the
+// "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path.
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Enum values for system.filesystem.state
+var (
+ // used
+ // Stability: development
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ // Stability: development
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ // Stability: development
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+// Enum values for system.filesystem.type
+var (
+ // fat32
+ // Stability: development
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ // Stability: development
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ // Stability: development
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ // Stability: development
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ // Stability: development
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ // Stability: development
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// Enum values for system.memory.linux.slab.state
+var (
+ // reclaimable
+ // Stability: development
+ SystemMemoryLinuxSlabStateReclaimable = SystemMemoryLinuxSlabStateKey.String("reclaimable")
+ // unreclaimable
+ // Stability: development
+ SystemMemoryLinuxSlabStateUnreclaimable = SystemMemoryLinuxSlabStateKey.String("unreclaimable")
+)
+
+// Enum values for system.memory.state
+var (
+ // Actual used virtual memory in bytes.
+ // Stability: development
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ // Stability: development
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // buffers
+ // Stability: development
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ // Stability: development
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Enum values for system.paging.direction
+var (
+ // in
+ // Stability: development
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ // Stability: development
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+// Enum values for system.paging.fault.type
+var (
+ // major
+ // Stability: development
+ SystemPagingFaultTypeMajor = SystemPagingFaultTypeKey.String("major")
+ // minor
+ // Stability: development
+ SystemPagingFaultTypeMinor = SystemPagingFaultTypeKey.String("minor")
+)
+
+// Enum values for system.paging.state
+var (
+ // used
+ // Stability: development
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ // Stability: development
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+// Namespace: telemetry
+const (
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of the
+ // auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "parts-unlimited-java"
+ // Note: Official auto instrumentation agents and distributions SHOULD set the
+ // `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2.3"
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the language of
+ // the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "opentelemetry"
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to
+ // `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is used,
+ // this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module name of
+ // this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.2.3"
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version string
+// of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// Enum values for telemetry.sdk.language
+var (
+ // cpp
+ // Stability: stable
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ // Stability: stable
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ // Stability: stable
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ // Stability: stable
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ // Stability: stable
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ // Stability: stable
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ // Stability: stable
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ // Stability: stable
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ // Stability: stable
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ // Stability: stable
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ // Stability: stable
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ // Stability: stable
+ TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// Namespace: test
+const (
+ // TestCaseNameKey is the attribute Key conforming to the "test.case.name"
+ // semantic conventions. It represents the fully qualified human readable name
+ // of the [test case].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1",
+ // "ExampleTestCase1_test1"
+ //
+ // [test case]: https://wikipedia.org/wiki/Test_case
+ TestCaseNameKey = attribute.Key("test.case.name")
+
+ // TestCaseResultStatusKey is the attribute Key conforming to the
+ // "test.case.result.status" semantic conventions. It represents the status of
+ // the actual test case result from test execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pass", "fail"
+ TestCaseResultStatusKey = attribute.Key("test.case.result.status")
+
+ // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name"
+ // semantic conventions. It represents the human readable name of a [test suite]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TestSuite1"
+ //
+ // [test suite]: https://wikipedia.org/wiki/Test_suite
+ TestSuiteNameKey = attribute.Key("test.suite.name")
+
+ // TestSuiteRunStatusKey is the attribute Key conforming to the
+ // "test.suite.run.status" semantic conventions. It represents the status of the
+ // test suite run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "skipped", "aborted", "timed_out",
+ // "in_progress"
+ TestSuiteRunStatusKey = attribute.Key("test.suite.run.status")
+)
+
+// TestCaseName returns an attribute KeyValue conforming to the "test.case.name"
+// semantic conventions. It represents the fully qualified human readable name of
+// the [test case].
+//
+// [test case]: https://wikipedia.org/wiki/Test_case
+func TestCaseName(val string) attribute.KeyValue {
+ return TestCaseNameKey.String(val)
+}
+
+// TestSuiteName returns an attribute KeyValue conforming to the
+// "test.suite.name" semantic conventions. It represents the human readable name
+// of a [test suite].
+//
+// [test suite]: https://wikipedia.org/wiki/Test_suite
+func TestSuiteName(val string) attribute.KeyValue {
+ return TestSuiteNameKey.String(val)
+}
+
+// Enum values for test.case.result.status
+var (
+ // pass
+ // Stability: development
+ TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass")
+ // fail
+ // Stability: development
+ TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail")
+)
+
+// Enum values for test.suite.run.status
+var (
+ // success
+ // Stability: development
+ TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success")
+ // failure
+ // Stability: development
+ TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure")
+ // skipped
+ // Stability: development
+ TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped")
+ // aborted
+ // Stability: development
+ TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted")
+ // timed_out
+ // Stability: development
+ TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out")
+ // in_progress
+ // Stability: development
+ TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress")
+)
+
+// Namespace: thread
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed to OS
+ // thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note:
+ // Examples of where the value can be extracted from:
+ //
+ // | Language or platform | Source |
+ // | --- | --- |
+ // | JVM | `Thread.currentThread().threadId()` |
+ // | .NET | `Thread.CurrentThread.ManagedThreadId` |
+ // | Python | `threading.current_thread().ident` |
+ // | Ruby | `Thread.current.object_id` |
+ // | C++ | `std::this_thread::get_id()` |
+ // | Erlang | `erlang:self()` |
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic
+ // conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: main
+ // Note:
+ // Examples of where the value can be extracted from:
+ //
+ // | Language or platform | Source |
+ // | --- | --- |
+ // | JVM | `Thread.currentThread().getName()` |
+ // | .NET | `Thread.CurrentThread.Name` |
+ // | Python | `threading.current_thread().name` |
+ // | Ruby | `Thread.current.name` |
+ // | Erlang | `erlang:process_info(self(), registered_name)` |
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic
+// conventions. It represents the current "managed" thread ID (as opposed to OS
+// thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Namespace: tls
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic
+ // conventions. It represents the string indicating the [cipher] used during the
+ // current connection.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+ // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
+ // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions`
+ // of the [registered TLS Cipher Suits].
+ //
+ // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+ // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the client. This is usually
+ // mutually-exclusive of `client.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // client. This is usually mutually-exclusive of `client.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the date/Time
+ // indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com"
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the array
+ // of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the given
+ // cipher, when applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "secp256r1"
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the "tls.established"
+ // semantic conventions. It represents the boolean flag indicating if the TLS
+ // negotiation was successful and transitioned to an encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol"
+ // semantic conventions. It represents the string indicating the protocol being
+ // tunneled. Per the values in the [IANA registry], this string should be lower
+ // case.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "http/1.1"
+ //
+ // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name"
+ // semantic conventions. It represents the normalized lowercase protocol name
+ // parsed from original string of the negotiated [SSL/TLS protocol version].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric part
+ // of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol version].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2", "3"
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic
+ // conventions. It represents the boolean flag indicating if this TLS connection
+ // was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the server. This is usually
+ // mutually-exclusive of `server.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // server. This is usually mutually-exclusive of `server.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s"
+ // semantic conventions. It represents a hash that identifies servers based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the date/Time
+ // indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the server.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com"
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the [cipher] used
+// during the current connection.
+//
+// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also exists
+// in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// client. This is usually mutually-exclusive of `client.certificate` since that
+// value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the client. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3"
+// semantic conventions. It represents a hash that identifies clients based on
+// how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic
+// conventions. It represents the string indicating the curve used for the given
+// cipher, when applicable.
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string indicating
+// the protocol being tunneled. Per the values in the [IANA registry], this
+// string should be lower case.
+//
+// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part of
+// the version parsed from the original string of the negotiated
+// [SSL/TLS protocol version].
+//
+// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also exists
+// in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// server. This is usually mutually-exclusive of `server.certificate` since that
+// value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the server. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Enum values for tls.protocol.name
+var (
+ // ssl
+ // Stability: development
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ // Stability: development
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// Namespace: url
+const (
+ // URLDomainKey is the attribute Key conforming to the "url.domain" semantic
+ // conventions. It represents the domain extracted from the `url.full`, such as
+ // "opentelemetry.io".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2",
+ // "[1080:0:0:0:8:800:200C:417A]"
+ // Note: In some cases a URL may refer to an IP and/or port directly, without a
+ // domain name. In this case, the IP address would go to the domain field. If
+ // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[`
+ // and `]` characters should also be captured in the domain field.
+ //
+ // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2
+ URLDomainKey = attribute.Key("url.domain")
+
+ // URLExtensionKey is the attribute Key conforming to the "url.extension"
+ // semantic conventions. It represents the file extension extracted from the
+ // `url.full`, excluding the leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: The file extension is only set if it exists, as not every url has a
+ // file extension. When the file name has multiple extensions `example.tar.gz`,
+ // only the last one should be captured `gz`, not `tar.gz`.
+ URLExtensionKey = attribute.Key("url.extension")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic
+ // conventions. It represents the [URI fragment] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SemConv"
+ //
+ // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network resource
+ // according to [RFC3986].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost"
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the fragment
+ // is not transmitted over HTTP, but if it is known, it SHOULD be included
+ // nevertheless.
+ //
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`.
+ // In such case username and password SHOULD be redacted and attribute's value
+ // SHOULD be `https://REDACTED:REDACTED@www.example.com/`.
+ //
+ // `url.full` SHOULD capture the absolute URL when it is available (or can be
+ // reconstructed).
+ //
+ // Sensitive content provided in `url.full` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the
+ // value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `https://www.example.com/path?color=blue&sig=REDACTED`.
+ //
+ // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLFullKey = attribute.Key("url.full")
+
+ // URLOriginalKey is the attribute Key conforming to the "url.original" semantic
+ // conventions. It represents the unmodified original URL as seen in the event
+ // source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv",
+ // "search?q=OpenTelemetry"
+ // Note: In network monitoring, the observed URL may be a full URL, whereas in
+ // access logs, the URL is often just represented as a path. This field is meant
+ // to represent the URL as it was observed, complete or not.
+ // `url.original` might contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case password and
+ // username SHOULD NOT be redacted and attribute's value SHOULD remain the same.
+ URLOriginalKey = attribute.Key("url.original")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI path] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/search"
+ // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+ URLPathKey = attribute.Key("url.path")
+
+ // URLPortKey is the attribute Key conforming to the "url.port" semantic
+ // conventions. It represents the port extracted from the `url.full`.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 443
+ URLPortKey = attribute.Key("url.port")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI query] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "q=OpenTelemetry"
+ // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `q=OpenTelemetry&sig=REDACTED`.
+ //
+ // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLRegisteredDomainKey is the attribute Key conforming to the
+ // "url.registered_domain" semantic conventions. It represents the highest
+ // registered url domain, stripped of the subdomain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.com", "foo.co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ // For example, the registered domain for `foo.example.com` is `example.com`.
+ // Trying to approximate this by simply taking the last two labels will not work
+ // well for TLDs such as `co.uk`.
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic
+ // conventions. It represents the [URI scheme] component identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https", "ftp", "telnet"
+ //
+ // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+ // semantic conventions. It represents the subdomain portion of a fully
+ // qualified domain name includes all of the names except the host name under
+ // the registered_domain. In a partially qualified domain, or if the
+ // qualification level of the full name cannot be determined, subdomain contains
+ // all of the names below the registered domain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "east", "sub2.sub1"
+ // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the
+ // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the
+ // subdomain field should contain `sub2.sub1`, with no trailing period.
+ URLSubdomainKey = attribute.Key("url.subdomain")
+
+ // URLTemplateKey is the attribute Key conforming to the "url.template" semantic
+ // conventions. It represents the low-cardinality template of an
+ // [absolute path reference].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/users/{id}", "/users/:id", "/users?id={id}"
+ //
+ // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+ URLTemplateKey = attribute.Key("url.template")
+
+ // URLTopLevelDomainKey is the attribute Key conforming to the
+ // "url.top_level_domain" semantic conventions. It represents the effective top
+ // level domain (eTLD), also known as the domain suffix, is the last part of the
+ // domain name. For example, the top level domain for example.com is `com`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com", "co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the `url.full`,
+// such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+ return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the "url.extension"
+// semantic conventions. It represents the file extension extracted from the
+// `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+ return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the "url.fragment"
+// semantic conventions. It represents the [URI fragment] component.
+//
+// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full" semantic
+// conventions. It represents the absolute URL describing a network resource
+// according to [RFC3986].
+//
+// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the "url.original"
+// semantic conventions. It represents the unmodified original URL as seen in the
+// event source.
+func URLOriginal(val string) attribute.KeyValue {
+ return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path" semantic
+// conventions. It represents the [URI path] component.
+//
+// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port" semantic
+// conventions. It represents the port extracted from the `url.full`.
+func URLPort(val int) attribute.KeyValue {
+ return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic
+// conventions. It represents the [URI query] component.
+//
+// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+ return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI scheme] component identifying the
+// used protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain"
+// semantic conventions. It represents the subdomain portion of a fully qualified
+// domain name includes all of the names except the host name under the
+// registered_domain. In a partially qualified domain, or if the qualification
+// level of the full name cannot be determined, subdomain contains all of the
+// names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+ return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the "url.template"
+// semantic conventions. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func URLTemplate(val string) attribute.KeyValue {
+ return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of the
+// domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+ return URLTopLevelDomainKey.String(val)
+}
+
+// Namespace: user
+const (
+ // UserEmailKey is the attribute Key conforming to the "user.email" semantic
+ // conventions. It represents the user email address.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein@example.com"
+ UserEmailKey = attribute.Key("user.email")
+
+ // UserFullNameKey is the attribute Key conforming to the "user.full_name"
+ // semantic conventions. It represents the user's full name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Albert Einstein"
+ UserFullNameKey = attribute.Key("user.full_name")
+
+ // UserHashKey is the attribute Key conforming to the "user.hash" semantic
+ // conventions. It represents the unique user hash to correlate information for
+ // a user in anonymized form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa"
+ // Note: Useful if `user.id` or `user.name` contain confidential information and
+ // cannot be used.
+ UserHashKey = attribute.Key("user.hash")
+
+ // UserIDKey is the attribute Key conforming to the "user.id" semantic
+ // conventions. It represents the unique identifier of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000"
+ UserIDKey = attribute.Key("user.id")
+
+ // UserNameKey is the attribute Key conforming to the "user.name" semantic
+ // conventions. It represents the short name or login/username of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein"
+ UserNameKey = attribute.Key("user.name")
+
+ // UserRolesKey is the attribute Key conforming to the "user.roles" semantic
+ // conventions. It represents the array of user roles at the time of the event.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "admin", "reporting_user"
+ UserRolesKey = attribute.Key("user.roles")
+)
+
+// UserEmail returns an attribute KeyValue conforming to the "user.email"
+// semantic conventions. It represents the user email address.
+func UserEmail(val string) attribute.KeyValue {
+ return UserEmailKey.String(val)
+}
+
+// UserFullName returns an attribute KeyValue conforming to the "user.full_name"
+// semantic conventions. It represents the user's full name.
+func UserFullName(val string) attribute.KeyValue {
+ return UserFullNameKey.String(val)
+}
+
+// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic
+// conventions. It represents the unique user hash to correlate information for a
+// user in anonymized form.
+func UserHash(val string) attribute.KeyValue {
+ return UserHashKey.String(val)
+}
+
+// UserID returns an attribute KeyValue conforming to the "user.id" semantic
+// conventions. It represents the unique identifier of the user.
+func UserID(val string) attribute.KeyValue {
+ return UserIDKey.String(val)
+}
+
+// UserName returns an attribute KeyValue conforming to the "user.name" semantic
+// conventions. It represents the short name or login/username of the user.
+func UserName(val string) attribute.KeyValue {
+ return UserNameKey.String(val)
+}
+
+// UserRoles returns an attribute KeyValue conforming to the "user.roles"
+// semantic conventions. It represents the array of user roles at the time of the
+// event.
+func UserRoles(val ...string) attribute.KeyValue {
+ return UserRolesKey.StringSlice(val)
+}
+
+// Namespace: user_agent
+const (
+ // UserAgentNameKey is the attribute Key conforming to the "user_agent.name"
+ // semantic conventions. It represents the name of the user-agent extracted from
+ // original. Usually refers to the browser's name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Safari", "YourApp"
+ // Note: [Example] of extracting browser's name from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant name SHOULD be selected. In such a scenario it should align with
+ // `user_agent.version`
+ //
+ // [Example]: https://uaparser.dev/#demo
+ UserAgentNameKey = attribute.Key("user_agent.name")
+
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of the
+ // [HTTP User-Agent] header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0
+ // grpc-java-okhttp/1.27.2"
+ //
+ // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+ // UserAgentOSNameKey is the attribute Key conforming to the
+ // "user_agent.os.name" semantic conventions. It represents the human readable
+ // operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ // Note: For mapping user agent strings to OS names, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSNameKey = attribute.Key("user_agent.os.name")
+
+ // UserAgentOSVersionKey is the attribute Key conforming to the
+ // "user_agent.os.version" semantic conventions. It represents the version
+ // string of the operating system as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ // Note: For mapping user agent strings to OS versions, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSVersionKey = attribute.Key("user_agent.os.version")
+
+ // UserAgentSyntheticTypeKey is the attribute Key conforming to the
+ // "user_agent.synthetic.type" semantic conventions. It represents the specifies
+ // the category of synthetic traffic, such as tests or bots.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute MAY be derived from the contents of the
+ // `user_agent.original` attribute. Components that populate the attribute are
+ // responsible for determining what they consider to be synthetic bot or test
+ // traffic. This attribute can either be set for self-identification purposes,
+ // or on telemetry detected to be generated as a result of a synthetic request.
+ // This attribute is useful for distinguishing between genuine client traffic
+ // and synthetic traffic generated by bots or tests.
+ UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type")
+
+ // UserAgentVersionKey is the attribute Key conforming to the
+ // "user_agent.version" semantic conventions. It represents the version of the
+ // user-agent extracted from original. Usually refers to the browser's version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.1.2", "1.0.0"
+ // Note: [Example] of extracting browser's version from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant version SHOULD be selected. In such a scenario it should align
+ // with `user_agent.name`
+ //
+ // [Example]: https://uaparser.dev/#demo
+ UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+ return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP User-Agent] header sent by the client.
+//
+// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentOSName returns an attribute KeyValue conforming to the
+// "user_agent.os.name" semantic conventions. It represents the human readable
+// operating system name.
+func UserAgentOSName(val string) attribute.KeyValue {
+ return UserAgentOSNameKey.String(val)
+}
+
+// UserAgentOSVersion returns an attribute KeyValue conforming to the
+// "user_agent.os.version" semantic conventions. It represents the version string
+// of the operating system as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func UserAgentOSVersion(val string) attribute.KeyValue {
+ return UserAgentOSVersionKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version.
+func UserAgentVersion(val string) attribute.KeyValue {
+ return UserAgentVersionKey.String(val)
+}
+
+// Enum values for user_agent.synthetic.type
+var (
+ // Bot source.
+ // Stability: development
+ UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot")
+ // Synthetic test source.
+ // Stability: development
+ UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test")
+)
+
+// Namespace: vcs
+const (
+ // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id"
+ // semantic conventions. It represents the ID of the change (pull request/merge
+ // request/changelist) if applicable. This is usually a unique (within
+ // repository) identifier generated by the VCS system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ VCSChangeIDKey = attribute.Key("vcs.change.id")
+
+ // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state"
+ // semantic conventions. It represents the state of the change (pull
+ // request/merge request/changelist).
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "open", "closed", "merged"
+ VCSChangeStateKey = attribute.Key("vcs.change.state")
+
+ // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title"
+ // semantic conventions. It represents the human readable title of the change
+ // (pull request/merge request/changelist). This title is often a brief summary
+ // of the change and may get merged in to a ref as the commit summary.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update
+ // dependency"
+ VCSChangeTitleKey = attribute.Key("vcs.change.title")
+
+ // VCSLineChangeTypeKey is the attribute Key conforming to the
+ // "vcs.line_change.type" semantic conventions. It represents the type of line
+ // change being measured on a branch or change.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "added", "removed"
+ VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type")
+
+ // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name"
+ // semantic conventions. It represents the group owner within the version
+ // control system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org", "myteam", "business-unit"
+ VCSOwnerNameKey = attribute.Key("vcs.owner.name")
+
+ // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name"
+ // semantic conventions. It represents the name of the version control system
+ // provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "github", "gitlab", "gitea", "bitbucket"
+ VCSProviderNameKey = attribute.Key("vcs.provider.name")
+
+ // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name")
+
+ // VCSRefBaseRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.base.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits. The
+ // revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.base.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision")
+
+ // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type")
+
+ // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name")
+
+ // VCSRefHeadRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.head.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.The revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.head.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision")
+
+ // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type")
+
+ // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic
+ // conventions. It represents the type of the [reference] in the repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefTypeKey = attribute.Key("vcs.ref.type")
+
+ // VCSRepositoryNameKey is the attribute Key conforming to the
+ // "vcs.repository.name" semantic conventions. It represents the human readable
+ // name of the repository. It SHOULD NOT include any additional identifier like
+ // Group/SubGroup in GitLab or organization in GitHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "semantic-conventions", "my-cool-repo"
+ // Note: Due to it only being the name, it can clash with forks of the same
+ // repository if collecting telemetry across multiple orgs or groups in
+ // the same backends.
+ VCSRepositoryNameKey = attribute.Key("vcs.repository.name")
+
+ // VCSRepositoryURLFullKey is the attribute Key conforming to the
+ // "vcs.repository.url.full" semantic conventions. It represents the
+ // [canonical URL] of the repository providing the complete HTTP(S) address in
+ // order to locate and identify the repository through a browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/opentelemetry/open-telemetry-collector-contrib",
+ // "https://gitlab.com/my-org/my-project/my-projects-project/repo"
+ // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include
+ // the `.git` extension.
+ //
+ // [canonical URL]: https://support.google.com/webmasters/answer/10347851
+ VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full")
+
+ // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the
+ // "vcs.revision_delta.direction" semantic conventions. It represents the type
+ // of revision comparison.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ahead", "behind"
+ VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction")
+)
+
+// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id"
+// semantic conventions. It represents the ID of the change (pull request/merge
+// request/changelist) if applicable. This is usually a unique (within
+// repository) identifier generated by the VCS system.
+func VCSChangeID(val string) attribute.KeyValue {
+ return VCSChangeIDKey.String(val)
+}
+
+// VCSChangeTitle returns an attribute KeyValue conforming to the
+// "vcs.change.title" semantic conventions. It represents the human readable
+// title of the change (pull request/merge request/changelist). This title is
+// often a brief summary of the change and may get merged in to a ref as the
+// commit summary.
+func VCSChangeTitle(val string) attribute.KeyValue {
+ return VCSChangeTitleKey.String(val)
+}
+
+// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name"
+// semantic conventions. It represents the group owner within the version control
+// system.
+func VCSOwnerName(val string) attribute.KeyValue {
+ return VCSOwnerNameKey.String(val)
+}
+
+// VCSRefBaseName returns an attribute KeyValue conforming to the
+// "vcs.ref.base.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefBaseName(val string) attribute.KeyValue {
+ return VCSRefBaseNameKey.String(val)
+}
+
+// VCSRefBaseRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.base.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefBaseRevision(val string) attribute.KeyValue {
+ return VCSRefBaseRevisionKey.String(val)
+}
+
+// VCSRefHeadName returns an attribute KeyValue conforming to the
+// "vcs.ref.head.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefHeadName(val string) attribute.KeyValue {
+ return VCSRefHeadNameKey.String(val)
+}
+
+// VCSRefHeadRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.head.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefHeadRevision(val string) attribute.KeyValue {
+ return VCSRefHeadRevisionKey.String(val)
+}
+
+// VCSRepositoryName returns an attribute KeyValue conforming to the
+// "vcs.repository.name" semantic conventions. It represents the human readable
+// name of the repository. It SHOULD NOT include any additional identifier like
+// Group/SubGroup in GitLab or organization in GitHub.
+func VCSRepositoryName(val string) attribute.KeyValue {
+ return VCSRepositoryNameKey.String(val)
+}
+
+// VCSRepositoryURLFull returns an attribute KeyValue conforming to the
+// "vcs.repository.url.full" semantic conventions. It represents the
+// [canonical URL] of the repository providing the complete HTTP(S) address in
+// order to locate and identify the repository through a browser.
+//
+// [canonical URL]: https://support.google.com/webmasters/answer/10347851
+func VCSRepositoryURLFull(val string) attribute.KeyValue {
+ return VCSRepositoryURLFullKey.String(val)
+}
+
+// Enum values for vcs.change.state
+var (
+ // Open means the change is currently active and under review. It hasn't been
+ // merged into the target branch yet, and it's still possible to make changes or
+ // add comments.
+ // Stability: development
+ VCSChangeStateOpen = VCSChangeStateKey.String("open")
+ // WIP (work-in-progress, draft) means the change is still in progress and not
+ // yet ready for a full review. It might still undergo significant changes.
+ // Stability: development
+ VCSChangeStateWip = VCSChangeStateKey.String("wip")
+ // Closed means the merge request has been closed without merging. This can
+ // happen for various reasons, such as the changes being deemed unnecessary, the
+ // issue being resolved in another way, or the author deciding to withdraw the
+ // request.
+ // Stability: development
+ VCSChangeStateClosed = VCSChangeStateKey.String("closed")
+ // Merged indicates that the change has been successfully integrated into the
+ // target codebase.
+ // Stability: development
+ VCSChangeStateMerged = VCSChangeStateKey.String("merged")
+)
+
+// Enum values for vcs.line_change.type
+var (
+ // How many lines were added.
+ // Stability: development
+ VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added")
+ // How many lines were removed.
+ // Stability: development
+ VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed")
+)
+
+// Enum values for vcs.provider.name
+var (
+ // [GitHub]
+ // Stability: development
+ //
+ // [GitHub]: https://github.com
+ VCSProviderNameGithub = VCSProviderNameKey.String("github")
+ // [GitLab]
+ // Stability: development
+ //
+ // [GitLab]: https://gitlab.com
+ VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab")
+ // [Gitea]
+ // Stability: development
+ //
+ // [Gitea]: https://gitea.io
+ VCSProviderNameGitea = VCSProviderNameKey.String("gitea")
+ // [Bitbucket]
+ // Stability: development
+ //
+ // [Bitbucket]: https://bitbucket.org
+ VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket")
+)
+
+// Enum values for vcs.ref.base.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.head.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefTypeBranch = VCSRefTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefTypeTag = VCSRefTypeKey.String("tag")
+)
+
+// Enum values for vcs.revision_delta.direction
+var (
+ // How many revisions the change is behind the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind")
+ // How many revisions the change is ahead of the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead")
+)
+
+// Namespace: webengine
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the additional
+ // description of the web engine (e.g. detailed version and edition
+ // information).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final"
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly"
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of the
+ // web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "21.0.0"
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the "webengine.name"
+// semantic conventions. It represents the name of the web engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the web
+// engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// Namespace: zos
+const (
+ // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic
+ // conventions. It represents the System Management Facility (SMF) Identifier
+ // uniquely identified a z/OS system within a SYSPLEX or mainframe environment
+ // and is used for system and performance analysis.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "SYS1"
+ ZOSSmfIDKey = attribute.Key("zos.smf.id")
+
+ // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name"
+ // semantic conventions. It represents the name of the SYSPLEX to which the z/OS
+ // system belongs too.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "SYSPLEX1"
+ ZOSSysplexNameKey = attribute.Key("zos.sysplex.name")
+)
+
+// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic
+// conventions. It represents the System Management Facility (SMF) Identifier
+// uniquely identified a z/OS system within a SYSPLEX or mainframe environment
+// and is used for system and performance analysis.
+func ZOSSmfID(val string) attribute.KeyValue {
+ return ZOSSmfIDKey.String(val)
+}
+
+// ZOSSysplexName returns an attribute KeyValue conforming to the
+// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX
+// to which the z/OS system belongs too.
+func ZOSSysplexName(val string) attribute.KeyValue {
+ return ZOSSysplexNameKey.String(val)
+}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/doc.go
similarity index 80%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.39.0/doc.go
index d031bbea..852362ef 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/doc.go
@@ -4,6 +4,6 @@
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the v1.26.0
+// patterns for OpenTelemetry things. This package represents the v1.39.0
// version of the OpenTelemetry semantic conventions.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/error_type.go
new file mode 100644
index 00000000..84cf636a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/error_type.go
@@ -0,0 +1,56 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0"
+
+import (
+ "reflect"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
+//
+// If err is nil, the returned attribute has the default value
+// [ErrorTypeOther].
+//
+// If err's type has the method
+//
+// ErrorType() string
+//
+// then the returned attribute has the value of err.ErrorType(). Otherwise, the
+// returned attribute has a value derived from the concrete type of err.
+//
+// The key of the returned attribute is [ErrorTypeKey].
+func ErrorType(err error) attribute.KeyValue {
+ if err == nil {
+ return ErrorTypeOther
+ }
+
+ return ErrorTypeKey.String(errorType(err))
+}
+
+func errorType(err error) string {
+ var s string
+ if et, ok := err.(interface{ ErrorType() string }); ok {
+ // Prioritize the ErrorType method if available.
+ s = et.ErrorType()
+ }
+ if s == "" {
+ // Fallback to reflection if the ErrorType method is not supported or
+ // returns an empty value.
+
+ t := reflect.TypeOf(err)
+ pkg, name := t.PkgPath(), t.Name()
+ if pkg != "" && name != "" {
+ s = pkg + "." + name
+ } else {
+ // The type has no package path or name (predeclared, not-defined,
+ // or alias for a not-defined type).
+ //
+ // This is not guaranteed to be unique, but is a best effort.
+ s = t.String()
+ }
+ }
+ return s
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/exception.go
similarity index 74%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.39.0/exception.go
index bfaee0d5..7b688ecc 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/exception.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/otelconv/metric.go
similarity index 89%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.39.0/otelconv/metric.go
index a78eafd1..901da869 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/otelconv/metric.go
@@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Package httpconv provides types and functionality for OpenTelemetry semantic
+// Package otelconv provides types and functionality for OpenTelemetry semantic
// conventions in the "otel" namespace.
package otelconv
@@ -122,48 +122,6 @@ var (
SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE"
)
-// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code
-// semantic conventions. It represents the gRPC status code of the last gRPC
-// requests performed in scope of this export call.
-type RPCGRPCStatusCodeAttr int64
-
-var (
- // RPCGRPCStatusCodeOk is the OK.
- RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0
- // RPCGRPCStatusCodeCancelled is the CANCELLED.
- RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1
- // RPCGRPCStatusCodeUnknown is the UNKNOWN.
- RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2
- // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT.
- RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3
- // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED.
- RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4
- // RPCGRPCStatusCodeNotFound is the NOT_FOUND.
- RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5
- // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS.
- RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6
- // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED.
- RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7
- // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED.
- RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8
- // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION.
- RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9
- // RPCGRPCStatusCodeAborted is the ABORTED.
- RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10
- // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE.
- RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11
- // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED.
- RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12
- // RPCGRPCStatusCodeInternal is the INTERNAL.
- RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13
- // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE.
- RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14
- // RPCGRPCStatusCodeDataLoss is the DATA_LOSS.
- RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15
- // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED.
- RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16
-)
-
// SDKExporterLogExported is an instrument used to record metric values
// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It
// represents the number of log records for which the export has finished, either
@@ -172,6 +130,11 @@ type SDKExporterLogExported struct {
metric.Int64Counter
}
+var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument.
func NewSDKExporterLogExported(
m metric.Meter,
@@ -182,15 +145,18 @@ func NewSDKExporterLogExported(
return SDKExporterLogExported{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterLogExportedOpts
+ } else {
+ opt = append(opt, newSDKExporterLogExportedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.exporter.log.exported",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterLogExported{noop.Int64Counter{}}, err
+ return SDKExporterLogExported{noop.Int64Counter{}}, err
}
return SDKExporterLogExported{i}, nil
}
@@ -319,6 +285,11 @@ type SDKExporterLogInflight struct {
metric.Int64UpDownCounter
}
+var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument.
func NewSDKExporterLogInflight(
m metric.Meter,
@@ -329,15 +300,18 @@ func NewSDKExporterLogInflight(
return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterLogInflightOpts
+ } else {
+ opt = append(opt, newSDKExporterLogInflightOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.log.inflight",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
+ return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterLogInflight{i}, nil
}
@@ -449,6 +423,11 @@ type SDKExporterMetricDataPointExported struct {
metric.Int64Counter
}
+var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
+ metric.WithUnit("{data_point}"),
+}
+
// NewSDKExporterMetricDataPointExported returns a new
// SDKExporterMetricDataPointExported instrument.
func NewSDKExporterMetricDataPointExported(
@@ -460,15 +439,18 @@ func NewSDKExporterMetricDataPointExported(
return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterMetricDataPointExportedOpts
+ } else {
+ opt = append(opt, newSDKExporterMetricDataPointExportedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.exporter.metric_data_point.exported",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
- metric.WithUnit("{data_point}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
+ return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
}
return SDKExporterMetricDataPointExported{i}, nil
}
@@ -598,6 +580,11 @@ type SDKExporterMetricDataPointInflight struct {
metric.Int64UpDownCounter
}
+var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{data_point}"),
+}
+
// NewSDKExporterMetricDataPointInflight returns a new
// SDKExporterMetricDataPointInflight instrument.
func NewSDKExporterMetricDataPointInflight(
@@ -609,15 +596,18 @@ func NewSDKExporterMetricDataPointInflight(
return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterMetricDataPointInflightOpts
+ } else {
+ opt = append(opt, newSDKExporterMetricDataPointInflightOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.metric_data_point.inflight",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
- metric.WithUnit("{data_point}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
+ return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterMetricDataPointInflight{i}, nil
}
@@ -728,6 +718,11 @@ type SDKExporterOperationDuration struct {
metric.Float64Histogram
}
+var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("The duration of exporting a batch of telemetry records."),
+ metric.WithUnit("s"),
+}
+
// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration
// instrument.
func NewSDKExporterOperationDuration(
@@ -739,15 +734,18 @@ func NewSDKExporterOperationDuration(
return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterOperationDurationOpts
+ } else {
+ opt = append(opt, newSDKExporterOperationDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"otel.sdk.exporter.operation.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("The duration of exporting a batch of telemetry records."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
+ return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
}
return SDKExporterOperationDuration{i}, nil
}
@@ -825,6 +823,7 @@ func (m SDKExporterOperationDuration) Record(
func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -865,11 +864,11 @@ func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) att
return attribute.String("otel.component.type", string(val))
}
-// AttrRPCGRPCStatusCode returns an optional attribute for the
-// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code
-// of the last gRPC requests performed in scope of this export call.
-func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue {
- return attribute.Int64("rpc.grpc.status_code", int64(val))
+// AttrRPCResponseStatusCode returns an optional attribute for the
+// "rpc.response.status_code" semantic convention. It represents the gRPC status
+// code of the last gRPC request performed in scope of this export call.
+func (SDKExporterOperationDuration) AttrRPCResponseStatusCode(val string) attribute.KeyValue {
+ return attribute.String("rpc.response.status_code", val)
}
// AttrServerAddress returns an optional attribute for the "server.address"
@@ -893,6 +892,11 @@ type SDKExporterSpanExported struct {
metric.Int64Counter
}
+var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument.
func NewSDKExporterSpanExported(
m metric.Meter,
@@ -903,15 +907,18 @@ func NewSDKExporterSpanExported(
return SDKExporterSpanExported{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterSpanExportedOpts
+ } else {
+ opt = append(opt, newSDKExporterSpanExportedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.exporter.span.exported",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterSpanExported{noop.Int64Counter{}}, err
+ return SDKExporterSpanExported{noop.Int64Counter{}}, err
}
return SDKExporterSpanExported{i}, nil
}
@@ -1040,6 +1047,11 @@ type SDKExporterSpanInflight struct {
metric.Int64UpDownCounter
}
+var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument.
func NewSDKExporterSpanInflight(
m metric.Meter,
@@ -1050,15 +1062,18 @@ func NewSDKExporterSpanInflight(
return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterSpanInflightOpts
+ } else {
+ opt = append(opt, newSDKExporterSpanInflightOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.span.inflight",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
+ return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterSpanInflight{i}, nil
}
@@ -1169,6 +1184,11 @@ type SDKLogCreated struct {
metric.Int64Counter
}
+var newSDKLogCreatedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKLogCreated returns a new SDKLogCreated instrument.
func NewSDKLogCreated(
m metric.Meter,
@@ -1179,15 +1199,18 @@ func NewSDKLogCreated(
return SDKLogCreated{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKLogCreatedOpts
+ } else {
+ opt = append(opt, newSDKLogCreatedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.log.created",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKLogCreated{noop.Int64Counter{}}, err
+ return SDKLogCreated{noop.Int64Counter{}}, err
}
return SDKLogCreated{i}, nil
}
@@ -1254,6 +1277,11 @@ type SDKMetricReaderCollectionDuration struct {
metric.Float64Histogram
}
+var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the collect operation of the metric reader."),
+ metric.WithUnit("s"),
+}
+
// NewSDKMetricReaderCollectionDuration returns a new
// SDKMetricReaderCollectionDuration instrument.
func NewSDKMetricReaderCollectionDuration(
@@ -1265,15 +1293,18 @@ func NewSDKMetricReaderCollectionDuration(
return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKMetricReaderCollectionDurationOpts
+ } else {
+ opt = append(opt, newSDKMetricReaderCollectionDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"otel.sdk.metric_reader.collection.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("The duration of the collect operation of the metric reader."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
+ return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
}
return SDKMetricReaderCollectionDuration{i}, nil
}
@@ -1343,6 +1374,7 @@ func (m SDKMetricReaderCollectionDuration) Record(
func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1384,6 +1416,11 @@ type SDKProcessorLogProcessed struct {
metric.Int64Counter
}
+var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument.
func NewSDKProcessorLogProcessed(
m metric.Meter,
@@ -1394,15 +1431,18 @@ func NewSDKProcessorLogProcessed(
return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorLogProcessedOpts
+ } else {
+ opt = append(opt, newSDKProcessorLogProcessedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.processor.log.processed",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
+ return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
}
return SDKProcessorLogProcessed{i}, nil
}
@@ -1515,6 +1555,11 @@ type SDKProcessorLogQueueCapacity struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity
// instrument.
func NewSDKProcessorLogQueueCapacity(
@@ -1526,15 +1571,18 @@ func NewSDKProcessorLogQueueCapacity(
return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorLogQueueCapacityOpts
+ } else {
+ opt = append(opt, newSDKProcessorLogQueueCapacityOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.log.queue.capacity",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorLogQueueCapacity{i}, nil
}
@@ -1581,6 +1629,11 @@ type SDKProcessorLogQueueSize struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument.
func NewSDKProcessorLogQueueSize(
m metric.Meter,
@@ -1591,15 +1644,18 @@ func NewSDKProcessorLogQueueSize(
return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorLogQueueSizeOpts
+ } else {
+ opt = append(opt, newSDKProcessorLogQueueSizeOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.log.queue.size",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorLogQueueSize{i}, nil
}
@@ -1646,6 +1702,11 @@ type SDKProcessorSpanProcessed struct {
metric.Int64Counter
}
+var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed
// instrument.
func NewSDKProcessorSpanProcessed(
@@ -1657,15 +1718,18 @@ func NewSDKProcessorSpanProcessed(
return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorSpanProcessedOpts
+ } else {
+ opt = append(opt, newSDKProcessorSpanProcessedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.processor.span.processed",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
+ return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
}
return SDKProcessorSpanProcessed{i}, nil
}
@@ -1778,6 +1842,11 @@ type SDKProcessorSpanQueueCapacity struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity
// instrument.
func NewSDKProcessorSpanQueueCapacity(
@@ -1789,15 +1858,18 @@ func NewSDKProcessorSpanQueueCapacity(
return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorSpanQueueCapacityOpts
+ } else {
+ opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.span.queue.capacity",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorSpanQueueCapacity{i}, nil
}
@@ -1844,6 +1916,11 @@ type SDKProcessorSpanQueueSize struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize
// instrument.
func NewSDKProcessorSpanQueueSize(
@@ -1855,15 +1932,18 @@ func NewSDKProcessorSpanQueueSize(
return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorSpanQueueSizeOpts
+ } else {
+ opt = append(opt, newSDKProcessorSpanQueueSizeOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.span.queue.size",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorSpanQueueSize{i}, nil
}
@@ -1910,6 +1990,11 @@ type SDKSpanLive struct {
metric.Int64UpDownCounter
}
+var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKSpanLive returns a new SDKSpanLive instrument.
func NewSDKSpanLive(
m metric.Meter,
@@ -1920,15 +2005,18 @@ func NewSDKSpanLive(
return SDKSpanLive{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKSpanLiveOpts
+ } else {
+ opt = append(opt, newSDKSpanLiveOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.span.live",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKSpanLive{noop.Int64UpDownCounter{}}, err
+ return SDKSpanLive{noop.Int64UpDownCounter{}}, err
}
return SDKSpanLive{i}, nil
}
@@ -2013,6 +2101,11 @@ type SDKSpanStarted struct {
metric.Int64Counter
}
+var newSDKSpanStartedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of created spans."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKSpanStarted returns a new SDKSpanStarted instrument.
func NewSDKSpanStarted(
m metric.Meter,
@@ -2023,15 +2116,18 @@ func NewSDKSpanStarted(
return SDKSpanStarted{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKSpanStartedOpts
+ } else {
+ opt = append(opt, newSDKSpanStartedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.span.started",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of created spans."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKSpanStarted{noop.Int64Counter{}}, err
+ return SDKSpanStarted{noop.Int64Counter{}}, err
}
return SDKSpanStarted{i}, nil
}
@@ -2123,4 +2219,4 @@ func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.K
// value of the sampler for this span.
func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue {
return attribute.String("otel.span.sampling_result", string(val))
-}
\ No newline at end of file
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/schema.go
similarity index 71%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.39.0/schema.go
index 4c87c7ad..e1a199d8 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.39.0/schema.go
@@ -1,9 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/
-const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
+const SchemaURL = "https://opentelemetry.io/schemas/1.39.0"
diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go
index 8763936a..604fdab4 100644
--- a/vendor/go.opentelemetry.io/otel/trace/auto.go
+++ b/vendor/go.opentelemetry.io/otel/trace/auto.go
@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/internal/telemetry"
)
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index aea11a2b..d9ecef1c 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -4,6 +4,7 @@
package trace // import "go.opentelemetry.io/otel/trace"
import (
+ "slices"
"time"
"go.opentelemetry.io/otel/attribute"
@@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption {
})
}
-// WithInstrumentationAttributes sets the instrumentation attributes.
+// mergeSets returns the union of keys between a and b. Any duplicate keys will
+// use the value associated with b.
+func mergeSets(a, b attribute.Set) attribute.Set {
+ // NewMergeIterator uses the first value for any duplicates.
+ iter := attribute.NewMergeIterator(&b, &a)
+ merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+ for iter.Next() {
+ merged = append(merged, iter.Attribute())
+ }
+ return attribute.NewSet(merged...)
+}
+
+// WithInstrumentationAttributes adds the instrumentation attributes.
//
-// The passed attributes will be de-duplicated.
+// This is equivalent to calling [WithInstrumentationAttributeSet] with an
+// [attribute.Set] created from a clone of the passed attributes.
+// [WithInstrumentationAttributeSet] is recommended for more control.
+//
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
+ set := attribute.NewSet(slices.Clone(attr)...)
+ return WithInstrumentationAttributeSet(set)
+}
+
+// WithInstrumentationAttributeSet adds the instrumentation attributes.
+//
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
+func WithInstrumentationAttributeSet(set attribute.Set) TracerOption {
+ if set.Len() == 0 {
+ return tracerOptionFunc(func(config TracerConfig) TracerConfig {
+ return config
+ })
+ }
+
return tracerOptionFunc(func(config TracerConfig) TracerConfig {
- config.attrs = attribute.NewSet(attr...)
+ if config.attrs.Len() == 0 {
+ config.attrs = set
+ } else {
+ config.attrs = mergeSets(config.attrs, set)
+ }
return config
})
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go
index d3aa476e..d01e7936 100644
--- a/vendor/go.opentelemetry.io/otel/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -66,6 +66,10 @@ type Span interface {
// SetAttributes sets kv as attributes of the Span. If a key from kv
// already exists for an attribute of the Span it will be overwritten with
// the value contained in kv.
+ //
+ // Note that adding attributes at span creation using [WithAttributes] is preferred
+ // to calling SetAttribute later, as samplers can only consider information
+ // already present during span creation.
SetAttributes(kv ...attribute.KeyValue)
// TracerProvider returns a TracerProvider that can be used to generate
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index bcaa5aa5..7c8f5080 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.38.0"
+ return "1.40.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 07145e25..9daa2df9 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,7 +3,7 @@
module-sets:
stable-v1:
- version: v1.38.0
+ version: v1.40.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@@ -22,11 +22,11 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.60.0
+ version: v0.62.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.14.0
+ version: v0.16.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/log/logtest
@@ -36,9 +36,31 @@ module-sets:
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
- version: v0.0.13
+ version: v0.0.14
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
- go.opentelemetry.io/otel/trace/internal/telemetry/test
+modules:
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/stdout/stdoutmetric:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/prometheus:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp:
+ version-refs:
+ - ./internal/version.go
diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
index a7c5d19b..1f8d49bc 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
@@ -34,7 +34,7 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
-// AnyValue is used to represent any type of attribute value. AnyValue may contain a
+// Represents any type of attribute value. AnyValue may contain a
// primitive value such as a string or integer or it may contain an arbitrary nested
// object containing arrays, key-value lists and primitives.
type AnyValue struct {
@@ -252,8 +252,10 @@ type KeyValueList struct {
// A collection of key/value pairs of key-value pairs. The list may be empty (may
// contain 0 elements).
+ //
// The keys MUST be unique (it is not allowed to have more than one
// value with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
}
@@ -296,14 +298,16 @@ func (x *KeyValueList) GetValues() []*KeyValue {
return nil
}
-// KeyValue is a key-value pair that is used to store Span attributes, Link
+// Represents a key-value pair that is used to store Span attributes, Link
// attributes, etc.
type KeyValue struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The key name of the pair.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The value of the pair.
Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
@@ -360,14 +364,21 @@ type InstrumentationScope struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
+ // A name denoting the Instrumentation scope.
// An empty instrumentation scope name means the name is unknown.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Defines the version of the instrumentation scope.
+ // An empty instrumentation scope version means the version is unknown.
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// Additional attributes that describe the scope. [Optional].
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
- Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
- DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ // The behavior of software that receives duplicated keys can be unpredictable.
+ Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // The number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
}
func (x *InstrumentationScope) Reset() {
diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
index ec187b13..748c9fb1 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
@@ -375,7 +375,8 @@ type ScopeMetrics struct {
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all metrics in the "metrics" field.
+ // This schema_url applies to the data in the "scope" field and all metrics in the
+ // "metrics" field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
@@ -521,11 +522,11 @@ type Metric struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // name of the metric.
+ // The name of the metric.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // description of the metric, which can be used in documentation.
+ // A description of the metric, which can be used in documentation.
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- // unit in which the metric value is reported. Follows the format
+ // The unit in which the metric value is reported. Follows the format
// described by https://unitsofmeasure.org/ucum.html.
Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
// Data determines the aggregation type (if any) of the metric, what is the
@@ -546,6 +547,7 @@ type Metric struct {
// for lossless roundtrip translation to / from another data model.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Metadata []*v11.KeyValue `protobuf:"bytes,12,rep,name=metadata,proto3" json:"metadata,omitempty"`
}
@@ -699,6 +701,8 @@ type Gauge struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
+ // The time series data points.
+ // Note: Multiple time series may be included (same timestamp, different attributes).
DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
}
@@ -748,11 +752,13 @@ type Sum struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
+ // The time series data points.
+ // Note: Multiple time series may be included (same timestamp, different attributes).
DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"`
- // If "true" means that the sum is monotonic.
+ // Represents whether the sum is monotonic.
IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
}
@@ -816,6 +822,8 @@ type Histogram struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
+ // The time series data points.
+ // Note: Multiple time series may be included (same timestamp, different attributes).
DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
@@ -875,6 +883,8 @@ type ExponentialHistogram struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
+ // The time series data points.
+ // Note: Multiple time series may be included (same timestamp, different attributes).
DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
// aggregation_temporality describes if the aggregator reports delta changes
// since last report time, or cumulative changes since a fixed start time.
@@ -941,6 +951,8 @@ type Summary struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
+ // The time series data points.
+ // Note: Multiple time series may be included (same timestamp, different attributes).
DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
}
@@ -994,6 +1006,7 @@ type NumberDataPoint struct {
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
@@ -1144,6 +1157,7 @@ type HistogramDataPoint struct {
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
@@ -1331,6 +1345,7 @@ type ExponentialHistogramDataPoint struct {
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Attributes []*v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
@@ -1343,11 +1358,11 @@ type ExponentialHistogramDataPoint struct {
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
// 1970.
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // count is the number of values in the population. Must be
+ // The number of values in the population. Must be
// non-negative. This value must be equal to the sum of the "bucket_counts"
// values in the positive and negative Buckets plus the "zero_count" field.
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
- // sum of the values in the population. If count is zero then this field
+ // The sum of the values in the population. If count is zero then this field
// must be zero.
//
// Note: Sum should only be filled out when measuring non-negative discrete
@@ -1372,7 +1387,7 @@ type ExponentialHistogramDataPoint struct {
// scale is not restricted by the protocol, as the permissible
// values depend on the range of the data.
Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
- // zero_count is the count of values that are either exactly zero or
+ // The count of values that are either exactly zero or
// within the region considered zero by the instrumentation at the
// tolerated degree of precision. This bucket stores values that
// cannot be expressed using the standard exponential formula as
@@ -1391,9 +1406,9 @@ type ExponentialHistogramDataPoint struct {
// (Optional) List of exemplars collected from
// measurements that were used to form the data point
Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
- // min is the minimum value over (start_time, end_time].
+ // The minimum value over (start_time, end_time].
Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
- // max is the maximum value over (start_time, end_time].
+ // The maximum value over (start_time, end_time].
Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
// ZeroThreshold may be optionally set to convey the width of the zero
// region. Where the zero region is defined as the closed interval
@@ -1546,6 +1561,7 @@ type SummaryDataPoint struct {
// where this point belongs. The list may be empty (may contain 0 elements).
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Attributes []*v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
// StartTimeUnixNano is optional but strongly encouraged, see the
// the detailed comments above Metric.
@@ -1798,11 +1814,11 @@ type ExponentialHistogramDataPoint_Buckets struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Offset is the bucket index of the first entry in the bucket_counts array.
+ // The bucket index of the first entry in the bucket_counts array.
//
// Note: This uses a varint encoding as a simple form of compression.
Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
- // bucket_counts is an array of count values, where bucket_counts[i] carries
+ // An array of count values, where bucket_counts[i] carries
// the count of the bucket at index (offset+i). bucket_counts[i] is the count
// of values greater than base^(offset+i) and less than or equal to
// base^(offset+i+1).
diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
index eb7745d6..301247dd 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
@@ -44,8 +44,9 @@ type Resource struct {
// Set of attributes that describe the resource.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
- // dropped_attributes_count is the number of dropped attributes. If the value is 0, then
+ // The number of dropped attributes. If the value is 0, then
// no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
// Set of entities that participate in this Resource.
diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
index b342a0a9..d7bfca90 100644
--- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
+++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
@@ -388,7 +388,8 @@ type ScopeSpans struct {
// is recorded in. Notably, the last part of the URL path is the version number of the
// schema: http[s]://server[:port]/path/. To learn more about Schema URL see
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
- // This schema_url applies to all spans and span events in the "spans" field.
+ // This schema_url applies to the data in the "scope" field and all spans and span
+ // events in the "spans" field.
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}
@@ -512,21 +513,21 @@ type Span struct {
// two spans with the same name may be distinguished using `CLIENT` (caller)
// and `SERVER` (callee) to identify queueing latency associated with the span.
Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
- // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // The start time of the span. On the client side, this is the time
// kept by the local machine where the span execution starts. On the server side, this
// is the time when the server's application handler starts running.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// This field is semantically required and it is expected that end_time >= start_time.
StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
- // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // The end time of the span. On the client side, this is the time
// kept by the local machine where the span execution ends. On the server side, this
// is the time when the server application handler stops running.
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// This field is semantically required and it is expected that end_time >= start_time.
EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"`
- // attributes is a collection of key/value pairs. Note, global attributes
+ // A collection of key/value pairs. Note, global attributes
// like server name can be set using the resource API. Examples of attributes:
//
// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
@@ -534,24 +535,23 @@ type Span struct {
// "example.com/myattribute": true
// "example.com/score": 10.239
//
- // The OpenTelemetry API specification further restricts the allowed value types:
- // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
- // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // The number of attributes that were discarded. Attributes
// can be discarded because their keys are too long or because there are too many
// attributes. If this value is 0, then no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
- // events is a collection of Event items.
+ // A collection of Event items.
Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
- // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // The number of dropped events. If the value is 0, then no
// events were dropped.
DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"`
- // links is a collection of Links, which are references from this span to a span
+ // A collection of Links, which are references from this span to a span
// in the same or different trace.
Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"`
- // dropped_links_count is the number of dropped links after the maximum size was
+ // The number of dropped links after the maximum size was
// enforced. If this value is 0, then no links were dropped.
DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
// An optional final status for this span. Semantically when Status isn't set, it means
@@ -769,16 +769,17 @@ type Span_Event struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // time_unix_nano is the time the event occurred.
+ // The time the event occurred.
TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
- // name of the event.
+ // The name of the event.
// This field is semantically required to be set to non-empty string.
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
- // attributes is a collection of attribute key/value pairs on the event.
+ // A collection of attribute key/value pairs on the event.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
- // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // The number of dropped attributes. If the value is 0,
// then no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
}
@@ -859,11 +860,12 @@ type Span_Link struct {
SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
// The trace_state associated with the link.
TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
- // attributes is a collection of attribute key/value pairs on the link.
+ // A collection of attribute key/value pairs on the link.
// Attribute keys MUST be unique (it is not allowed to have more than one
// attribute with the same key).
+ // The behavior of software that receives duplicated keys can be unpredictable.
Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"`
- // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // The number of dropped attributes. If the value is 0,
// then no attributes were dropped.
DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
// Flags, a bit field.
diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
index 7dd2638e..769af387 100644
--- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
+++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
@@ -29,7 +29,7 @@ loop:
MOVD $NUM_ROUNDS, R21
VLD1 (R11), [V30.S4, V31.S4]
- // load contants
+ // load constants
// VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
WORD $0x4D60E940
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
index 8cf5d811..95679552 100644
--- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
@@ -38,6 +38,9 @@ type chacha20poly1305 struct {
// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key.
func New(key []byte) (cipher.AEAD, error) {
+ if fips140Enforced() {
+ return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
+ }
if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length")
}
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
index 50695a14..b850e772 100644
--- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
@@ -56,7 +56,10 @@ func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []
ret, out := sliceForAppend(dst, len(plaintext)+16)
if alias.InexactOverlap(out, plaintext) {
- panic("chacha20poly1305: invalid buffer overlap")
+ panic("chacha20poly1305: invalid buffer overlap of output and input")
+ }
+ if alias.AnyOverlap(out, additionalData) {
+ panic("chacha20poly1305: invalid buffer overlap of output and additional data")
}
chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData)
return ret
@@ -73,7 +76,10 @@ func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) (
ciphertext = ciphertext[:len(ciphertext)-16]
ret, out := sliceForAppend(dst, len(ciphertext))
if alias.InexactOverlap(out, ciphertext) {
- panic("chacha20poly1305: invalid buffer overlap")
+ panic("chacha20poly1305: invalid buffer overlap of output and input")
+ }
+ if alias.AnyOverlap(out, additionalData) {
+ panic("chacha20poly1305: invalid buffer overlap of output and additional data")
}
if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) {
for i := range out {
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
index 6313898f..2ecc840f 100644
--- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
@@ -31,7 +31,10 @@ func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []b
ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize)
ciphertext, tag := out[:len(plaintext)], out[len(plaintext):]
if alias.InexactOverlap(out, plaintext) {
- panic("chacha20poly1305: invalid buffer overlap")
+ panic("chacha20poly1305: invalid buffer overlap of output and input")
+ }
+ if alias.AnyOverlap(out, additionalData) {
+ panic("chacha20poly1305: invalid buffer overlap of output and additional data")
}
var polyKey [32]byte
@@ -67,7 +70,10 @@ func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []
ret, out := sliceForAppend(dst, len(ciphertext))
if alias.InexactOverlap(out, ciphertext) {
- panic("chacha20poly1305: invalid buffer overlap")
+ panic("chacha20poly1305: invalid buffer overlap of output and input")
+ }
+ if alias.AnyOverlap(out, additionalData) {
+ panic("chacha20poly1305: invalid buffer overlap of output and additional data")
}
if !p.Verify(tag) {
for i := range out {
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_compat.go b/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_compat.go
new file mode 100644
index 00000000..9b9d5643
--- /dev/null
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_compat.go
@@ -0,0 +1,9 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.26
+
+package chacha20poly1305
+
+func fips140Enforced() bool { return false }
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_go1.26.go b/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_go1.26.go
new file mode 100644
index 00000000..f71089c4
--- /dev/null
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/fips140only_go1.26.go
@@ -0,0 +1,11 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.26
+
+package chacha20poly1305
+
+import "crypto/fips140"
+
+func fips140Enforced() bool { return fips140.Enforced() }
diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
index 1cebfe94..b4299b71 100644
--- a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
+++ b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
@@ -22,6 +22,9 @@ type xchacha20poly1305 struct {
// preferred when nonce uniqueness cannot be trivially ensured, or whenever
// nonces are randomly generated.
func NewX(key []byte) (cipher.AEAD, error) {
+ if fips140Enforced() {
+ return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
+ }
if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length")
}
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
index 21ca3b2e..048faef3 100644
--- a/vendor/golang.org/x/crypto/curve25519/curve25519.go
+++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
@@ -3,11 +3,14 @@
// license that can be found in the LICENSE file.
// Package curve25519 provides an implementation of the X25519 function, which
-// performs scalar multiplication on the elliptic curve known as Curve25519.
-// See RFC 7748.
+// performs scalar multiplication on the elliptic curve known as Curve25519
+// according to [RFC 7748].
//
-// This package is a wrapper for the X25519 implementation
-// in the crypto/ecdh package.
+// The curve25519 package is a wrapper for the X25519 implementation in the
+// crypto/ecdh package. It is [frozen] and is not accepting new features.
+//
+// [RFC 7748]: https://datatracker.ietf.org/doc/html/rfc7748
+// [frozen]: https://go.dev/wiki/Frozen
package curve25519
import "crypto/ecdh"
@@ -36,7 +39,7 @@ func ScalarBaseMult(dst, scalar *[32]byte) {
curve := ecdh.X25519()
priv, err := curve.NewPrivateKey(scalar[:])
if err != nil {
- panic("curve25519: internal error: scalarBaseMult was not 32 bytes")
+ panic("curve25519: " + err.Error())
}
copy(dst[:], priv.PublicKey().Bytes())
}
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
index 6a5b582a..7554ed57 100644
--- a/vendor/golang.org/x/crypto/ssh/cipher.go
+++ b/vendor/golang.org/x/crypto/ssh/cipher.go
@@ -8,6 +8,7 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
+ "crypto/fips140"
"crypto/rc4"
"crypto/subtle"
"encoding/binary"
@@ -15,6 +16,7 @@ import (
"fmt"
"hash"
"io"
+ "slices"
"golang.org/x/crypto/chacha20"
"golang.org/x/crypto/internal/poly1305"
@@ -93,41 +95,41 @@ func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream,
}
// cipherModes documents properties of supported ciphers. Ciphers not included
-// are not supported and will not be negotiated, even if explicitly requested in
-// ClientConfig.Crypto.Ciphers.
-var cipherModes = map[string]*cipherMode{
- // Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms
- // are defined in the order specified in the RFC.
- CipherAES128CTR: {16, aes.BlockSize, streamCipherMode(0, newAESCTR)},
- CipherAES192CTR: {24, aes.BlockSize, streamCipherMode(0, newAESCTR)},
- CipherAES256CTR: {32, aes.BlockSize, streamCipherMode(0, newAESCTR)},
-
- // Ciphers from RFC 4345, which introduces security-improved arcfour ciphers.
- // They are defined in the order specified in the RFC.
- InsecureCipherRC4128: {16, 0, streamCipherMode(1536, newRC4)},
- InsecureCipherRC4256: {32, 0, streamCipherMode(1536, newRC4)},
-
- // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
- // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
- // RC4) has problems with weak keys, and should be used with caution."
- // RFC 4345 introduces improved versions of Arcfour.
- InsecureCipherRC4: {16, 0, streamCipherMode(0, newRC4)},
-
- // AEAD ciphers
- CipherAES128GCM: {16, 12, newGCMCipher},
- CipherAES256GCM: {32, 12, newGCMCipher},
- CipherChaCha20Poly1305: {64, 0, newChaCha20Cipher},
-
+// are not supported and will not be negotiated, even if explicitly configured.
+// When FIPS mode is enabled, only FIPS-approved algorithms are included.
+var cipherModes = map[string]*cipherMode{}
+
+func init() {
+ cipherModes[CipherAES128CTR] = &cipherMode{16, aes.BlockSize, streamCipherMode(0, newAESCTR)}
+ cipherModes[CipherAES192CTR] = &cipherMode{24, aes.BlockSize, streamCipherMode(0, newAESCTR)}
+ cipherModes[CipherAES256CTR] = &cipherMode{32, aes.BlockSize, streamCipherMode(0, newAESCTR)}
+ // Use of GCM with arbitrary IVs is not allowed in FIPS 140-only mode,
+ // we'll wire it up to NewGCMForSSH in Go 1.26.
+ //
+ // For now it means we'll work with fips140=on but not fips140=only.
+ cipherModes[CipherAES128GCM] = &cipherMode{16, 12, newGCMCipher}
+ cipherModes[CipherAES256GCM] = &cipherMode{32, 12, newGCMCipher}
+
+ if fips140.Enabled() {
+ defaultCiphers = slices.DeleteFunc(defaultCiphers, func(algo string) bool {
+ _, ok := cipherModes[algo]
+ return !ok
+ })
+ return
+ }
+
+ cipherModes[CipherChaCha20Poly1305] = &cipherMode{64, 0, newChaCha20Cipher}
+ // Insecure ciphers not included in the default configuration.
+ cipherModes[InsecureCipherRC4128] = &cipherMode{16, 0, streamCipherMode(1536, newRC4)}
+ cipherModes[InsecureCipherRC4256] = &cipherMode{32, 0, streamCipherMode(1536, newRC4)}
+ cipherModes[InsecureCipherRC4] = &cipherMode{16, 0, streamCipherMode(0, newRC4)}
// CBC mode is insecure and so is not included in the default config.
// (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if
// you do.
- InsecureCipherAES128CBC: {16, aes.BlockSize, newAESCBCCipher},
-
- // 3des-cbc is insecure and is not included in the default
- // config.
- InsecureCipherTripleDESCBC: {24, des.BlockSize, newTripleDESCBCCipher},
+ cipherModes[InsecureCipherAES128CBC] = &cipherMode{16, aes.BlockSize, newAESCBCCipher}
+ cipherModes[InsecureCipherTripleDESCBC] = &cipherMode{24, des.BlockSize, newTripleDESCBCCipher}
}
// prefixLen is the length of the packet prefix that contains the packet length
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
index c12818fd..3127e499 100644
--- a/vendor/golang.org/x/crypto/ssh/client_auth.go
+++ b/vendor/golang.org/x/crypto/ssh/client_auth.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
+ "slices"
"strings"
)
@@ -83,7 +84,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
// success
return nil
} else if ok == authFailure {
- if m := auth.method(); !contains(tried, m) {
+ if m := auth.method(); !slices.Contains(tried, m) {
tried = append(tried, m)
}
}
@@ -97,7 +98,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
findNext:
for _, a := range config.Auth {
candidateMethod := a.method()
- if contains(tried, candidateMethod) {
+ if slices.Contains(tried, candidateMethod) {
continue
}
for _, meth := range methods {
@@ -117,15 +118,6 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
}
-func contains(list []string, e string) bool {
- for _, s := range list {
- if s == e {
- return true
- }
- }
- return false
-}
-
// An AuthMethod represents an instance of an RFC 4252 authentication method.
type AuthMethod interface {
// auth authenticates user over transport t.
@@ -255,7 +247,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Fallback to use if there is no "server-sig-algs" extension or a
// common algorithm cannot be found. We use the public key format if the
// MultiAlgorithmSigner supports it, otherwise we return an error.
- if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
+ if !slices.Contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v",
underlyingAlgo(keyFormat), keyFormat, as.Algorithms())
}
@@ -284,7 +276,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Filter algorithms based on those supported by MultiAlgorithmSigner.
var keyAlgos []string
for _, algo := range algorithmsForKeyFormat(keyFormat) {
- if contains(as.Algorithms(), underlyingAlgo(algo)) {
+ if slices.Contains(as.Algorithms(), underlyingAlgo(algo)) {
keyAlgos = append(keyAlgos, algo)
}
}
@@ -334,7 +326,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// the key try to use the obtained algorithm as if "server-sig-algs" had
// not been implemented if supported from the algorithm signer.
if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 {
- if contains(as.Algorithms(), KeyAlgoRSA) {
+ if slices.Contains(as.Algorithms(), KeyAlgoRSA) {
// We retry using the compat algorithm after all signers have
// been tried normally.
signers = append(signers, &multiAlgorithmSigner{
@@ -385,7 +377,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// contain the "publickey" method, do not attempt to authenticate with any
// other keys. According to RFC 4252 Section 7, the latter can occur when
// additional authentication methods are required.
- if success == authSuccess || !contains(methods, cb.method()) {
+ if success == authSuccess || !slices.Contains(methods, cb.method()) {
return success, methods, err
}
}
@@ -434,7 +426,7 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
// servers send the key type instead. OpenSSH allows any algorithm
// that matches the public key, so we do the same.
// https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709
- if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
+ if !slices.Contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
return false, nil
}
if !bytes.Equal(msg.PubKey, pubKey) {
diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
index f2ec0896..2e44e9c9 100644
--- a/vendor/golang.org/x/crypto/ssh/common.go
+++ b/vendor/golang.org/x/crypto/ssh/common.go
@@ -6,6 +6,7 @@ package ssh
import (
"crypto"
+ "crypto/fips140"
"crypto/rand"
"fmt"
"io"
@@ -83,6 +84,7 @@ var (
// supportedKexAlgos specifies key-exchange algorithms implemented by this
// package in preference order, excluding those with security issues.
supportedKexAlgos = []string{
+ KeyExchangeMLKEM768X25519,
KeyExchangeCurve25519,
KeyExchangeECDHP256,
KeyExchangeECDHP384,
@@ -94,6 +96,7 @@ var (
// defaultKexAlgos specifies the default preference for key-exchange
// algorithms in preference order.
defaultKexAlgos = []string{
+ KeyExchangeMLKEM768X25519,
KeyExchangeCurve25519,
KeyExchangeECDHP256,
KeyExchangeECDHP384,
@@ -254,6 +257,40 @@ type Algorithms struct {
PublicKeyAuths []string
}
+func init() {
+ if fips140.Enabled() {
+ defaultHostKeyAlgos = slices.DeleteFunc(defaultHostKeyAlgos, func(algo string) bool {
+ _, err := hashFunc(underlyingAlgo(algo))
+ return err != nil
+ })
+ defaultPubKeyAuthAlgos = slices.DeleteFunc(defaultPubKeyAuthAlgos, func(algo string) bool {
+ _, err := hashFunc(underlyingAlgo(algo))
+ return err != nil
+ })
+ }
+}
+
+func hashFunc(format string) (crypto.Hash, error) {
+ switch format {
+ case KeyAlgoRSASHA256, KeyAlgoECDSA256, KeyAlgoSKED25519, KeyAlgoSKECDSA256:
+ return crypto.SHA256, nil
+ case KeyAlgoECDSA384:
+ return crypto.SHA384, nil
+ case KeyAlgoRSASHA512, KeyAlgoECDSA521:
+ return crypto.SHA512, nil
+ case KeyAlgoED25519:
+ // KeyAlgoED25519 doesn't pre-hash.
+ return 0, nil
+ case KeyAlgoRSA, InsecureKeyAlgoDSA:
+ if fips140.Enabled() {
+ return 0, fmt.Errorf("ssh: hash algorithm for format %q not allowed in FIPS 140 mode", format)
+ }
+ return crypto.SHA1, nil
+ default:
+ return 0, fmt.Errorf("ssh: hash algorithm for format %q not mapped", format)
+ }
+}
+
// SupportedAlgorithms returns algorithms currently implemented by this package,
// excluding those with security issues, which are returned by
// InsecureAlgorithms. The algorithms listed here are in preference order.
@@ -281,21 +318,6 @@ func InsecureAlgorithms() Algorithms {
var supportedCompressions = []string{compressionNone}
-// hashFuncs keeps the mapping of supported signature algorithms to their
-// respective hashes needed for signing and verification.
-var hashFuncs = map[string]crypto.Hash{
- KeyAlgoRSA: crypto.SHA1,
- KeyAlgoRSASHA256: crypto.SHA256,
- KeyAlgoRSASHA512: crypto.SHA512,
- InsecureKeyAlgoDSA: crypto.SHA1,
- KeyAlgoECDSA256: crypto.SHA256,
- KeyAlgoECDSA384: crypto.SHA384,
- KeyAlgoECDSA521: crypto.SHA512,
- // KeyAlgoED25519 doesn't pre-hash.
- KeyAlgoSKECDSA256: crypto.SHA256,
- KeyAlgoSKED25519: crypto.SHA256,
-}
-
// algorithmsForKeyFormat returns the supported signature algorithms for a given
// public key format (PublicKey.Type), in order of preference. See RFC 8332,
// Section 2. See also the note in sendKexInit on backwards compatibility.
@@ -310,11 +332,40 @@ func algorithmsForKeyFormat(keyFormat string) []string {
}
}
+// keyFormatForAlgorithm returns the key format corresponding to the given
+// signature algorithm. It returns an empty string if the signature algorithm is
+// invalid or unsupported.
+func keyFormatForAlgorithm(sigAlgo string) string {
+ switch sigAlgo {
+ case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512:
+ return KeyAlgoRSA
+ case CertAlgoRSAv01, CertAlgoRSASHA256v01, CertAlgoRSASHA512v01:
+ return CertAlgoRSAv01
+ case KeyAlgoED25519,
+ KeyAlgoSKED25519,
+ KeyAlgoSKECDSA256,
+ KeyAlgoECDSA256,
+ KeyAlgoECDSA384,
+ KeyAlgoECDSA521,
+ InsecureKeyAlgoDSA,
+ InsecureCertAlgoDSAv01,
+ CertAlgoECDSA256v01,
+ CertAlgoECDSA384v01,
+ CertAlgoECDSA521v01,
+ CertAlgoSKECDSA256v01,
+ CertAlgoED25519v01,
+ CertAlgoSKED25519v01:
+ return sigAlgo
+ default:
+ return ""
+ }
+}
+
// isRSA returns whether algo is a supported RSA algorithm, including certificate
// algorithms.
func isRSA(algo string) bool {
algos := algorithmsForKeyFormat(KeyAlgoRSA)
- return contains(algos, underlyingAlgo(algo))
+ return slices.Contains(algos, underlyingAlgo(algo))
}
func isRSACert(algo string) bool {
@@ -513,7 +564,7 @@ func (c *Config) SetDefaults() {
if kexAlgoMap[k] != nil {
// Ignore the KEX if we have no kexAlgoMap definition.
kexs = append(kexs, k)
- if k == KeyExchangeCurve25519 && !contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) {
+ if k == KeyExchangeCurve25519 && !slices.Contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) {
kexs = append(kexs, keyExchangeCurve25519LibSSH)
}
}
diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go
index 04ccce34..5b4de9ef 100644
--- a/vendor/golang.org/x/crypto/ssh/doc.go
+++ b/vendor/golang.org/x/crypto/ssh/doc.go
@@ -17,8 +17,18 @@ References:
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
[SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01
+ [FIPS 140-3 mode]: https://go.dev/doc/security/fips140
This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise.
+
+# FIPS 140-3 mode
+
+When the program is in [FIPS 140-3 mode], this package behaves as if only SP
+800-140C and SP 800-140D approved cipher suites, signature algorithms,
+certificate public key types and sizes, and key exchange and derivation
+algorithms were implemented. Others are silently ignored and not negotiated, or
+rejected. This set may depend on the algorithms supported by the FIPS 140-3 Go
+Cryptographic Module selected with GOFIPS140, and may change across Go versions.
*/
package ssh
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
index a90bfe33..4be3cbb6 100644
--- a/vendor/golang.org/x/crypto/ssh/handshake.go
+++ b/vendor/golang.org/x/crypto/ssh/handshake.go
@@ -10,6 +10,7 @@ import (
"io"
"log"
"net"
+ "slices"
"strings"
"sync"
)
@@ -527,7 +528,7 @@ func (t *handshakeTransport) sendKexInit() error {
switch s := k.(type) {
case MultiAlgorithmSigner:
for _, algo := range algorithmsForKeyFormat(keyFormat) {
- if contains(s.Algorithms(), underlyingAlgo(algo)) {
+ if slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo)
}
}
@@ -679,7 +680,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
return err
}
- if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) {
+ if t.sessionID == nil && ((isClient && slices.Contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && slices.Contains(clientInit.KexAlgos, kexStrictClient))) {
t.strictMode = true
if err := t.conn.setStrictMode(); err != nil {
return err
@@ -736,7 +737,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
// On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO
// message with the server-sig-algs extension if the client supports it. See
// RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9.
- if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") {
+ if !isClient && firstKeyExchange && slices.Contains(clientInit.KexAlgos, "ext-info-c") {
supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",")
extInfo := &extInfoMsg{
NumExtensions: 2,
@@ -790,7 +791,7 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a
func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
for _, k := range hostKeys {
if s, ok := k.(MultiAlgorithmSigner); ok {
- if !contains(s.Algorithms(), underlyingAlgo(algo)) {
+ if !slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
continue
}
}
diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
index cf388a92..5f7fdd85 100644
--- a/vendor/golang.org/x/crypto/ssh/kex.go
+++ b/vendor/golang.org/x/crypto/ssh/kex.go
@@ -8,13 +8,14 @@ import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
+ "crypto/fips140"
"crypto/rand"
- "crypto/subtle"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
+ "slices"
"golang.org/x/crypto/curve25519"
)
@@ -396,9 +397,27 @@ func ecHash(curve elliptic.Curve) crypto.Hash {
return crypto.SHA512
}
+// kexAlgoMap defines the supported KEXs. KEXs not included are not supported
+// and will not be negotiated, even if explicitly configured. When FIPS mode is
+// enabled, only FIPS-approved algorithms are included.
var kexAlgoMap = map[string]kexAlgorithm{}
func init() {
+ // mlkem768x25519-sha256 we'll work with fips140=on but not fips140=only
+ // until Go 1.26.
+ kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{}
+ kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()}
+ kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()}
+ kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()}
+
+ if fips140.Enabled() {
+ defaultKexAlgos = slices.DeleteFunc(defaultKexAlgos, func(algo string) bool {
+ _, ok := kexAlgoMap[algo]
+ return !ok
+ })
+ return
+ }
+
p, _ := new(big.Int).SetString(oakleyGroup2, 16)
kexAlgoMap[InsecureKeyExchangeDH1SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
@@ -432,9 +451,6 @@ func init() {
hashFunc: crypto.SHA512,
}
- kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()}
- kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()}
- kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()}
kexAlgoMap[KeyExchangeCurve25519] = &curve25519sha256{}
kexAlgoMap[keyExchangeCurve25519LibSSH] = &curve25519sha256{}
kexAlgoMap[InsecureKeyExchangeDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1}
@@ -454,15 +470,17 @@ func (kp *curve25519KeyPair) generate(rand io.Reader) error {
if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
return err
}
- curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
+ p, err := curve25519.X25519(kp.priv[:], curve25519.Basepoint)
+ if err != nil {
+ return fmt.Errorf("curve25519: %w", err)
+ }
+ if len(p) != 32 {
+ return fmt.Errorf("curve25519: internal error: X25519 returned %d bytes, expected 32", len(p))
+ }
+ copy(kp.pub[:], p)
return nil
}
-// curve25519Zeros is just an array of 32 zero bytes so that we have something
-// convenient to compare against in order to reject curve25519 points with the
-// wrong order.
-var curve25519Zeros [32]byte
-
func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
var kp curve25519KeyPair
if err := kp.generate(rand); err != nil {
@@ -485,11 +503,9 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh
return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
}
- var servPub, secret [32]byte
- copy(servPub[:], reply.EphemeralPubKey)
- curve25519.ScalarMult(&secret, &kp.priv, &servPub)
- if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
- return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
+ secret, err := curve25519.X25519(kp.priv[:], reply.EphemeralPubKey)
+ if err != nil {
+ return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err)
}
h := crypto.SHA256.New()
@@ -531,11 +547,9 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
return nil, err
}
- var clientPub, secret [32]byte
- copy(clientPub[:], kexInit.ClientPubKey)
- curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
- if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
- return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
+ secret, err := curve25519.X25519(kp.priv[:], kexInit.ClientPubKey)
+ if err != nil {
+ return nil, fmt.Errorf("ssh: peer's curve25519 public value is not valid: %w", err)
}
hostKeyBytes := priv.PublicKey().Marshal()
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index a28c0de5..47a07539 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -27,6 +27,7 @@ import (
"fmt"
"io"
"math/big"
+ "slices"
"strings"
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
@@ -89,6 +90,11 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
}
return cert, nil, nil
}
+ if keyFormat := keyFormatForAlgorithm(algo); keyFormat != "" {
+ return nil, nil, fmt.Errorf("ssh: signature algorithm %q isn't a key format; key is malformed and should be re-encoded with type %q",
+ algo, keyFormat)
+ }
+
return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
}
@@ -191,9 +197,10 @@ func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey
return "", nil, nil, "", nil, io.EOF
}
-// ParseAuthorizedKey parses a public key from an authorized_keys
-// file used in OpenSSH according to the sshd(8) manual page.
+// ParseAuthorizedKey parses a public key from an authorized_keys file used in
+// OpenSSH according to the sshd(8) manual page. Invalid lines are ignored.
func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
+ var lastErr error
for len(in) > 0 {
end := bytes.IndexByte(in, '\n')
if end != -1 {
@@ -222,6 +229,8 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
return out, comment, options, rest, nil
+ } else {
+ lastErr = err
}
// No key type recognised. Maybe there's an options field at
@@ -264,12 +273,18 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
options = candidateOptions
return out, comment, options, rest, nil
+ } else {
+ lastErr = err
}
in = rest
continue
}
+ if lastErr != nil {
+ return nil, "", nil, nil, fmt.Errorf("ssh: no key found; last parsing error for ignored line: %w", lastErr)
+ }
+
return nil, "", nil, nil, errors.New("ssh: no key found")
}
@@ -395,11 +410,11 @@ func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (Multi
}
for _, algo := range algorithms {
- if !contains(supportedAlgos, algo) {
+ if !slices.Contains(supportedAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q",
algo, signer.PublicKey().Type())
}
- if !contains(signerAlgos, algo) {
+ if !slices.Contains(signerAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo)
}
}
@@ -486,10 +501,13 @@ func (r *rsaPublicKey) Marshal() []byte {
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
supportedAlgos := algorithmsForKeyFormat(r.Type())
- if !contains(supportedAlgos, sig.Format) {
+ if !slices.Contains(supportedAlgos, sig.Format) {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
- hash := hashFuncs[sig.Format]
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -606,7 +624,11 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -651,7 +673,11 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
}
- h := hashFuncs[k.PublicKey().Type()].New()
+ hash, err := hashFunc(k.PublicKey().Type())
+ if err != nil {
+ return nil, err
+ }
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
@@ -801,8 +827,11 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
-
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@@ -905,8 +934,11 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
-
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@@ -1009,7 +1041,11 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("invalid size %d for Ed25519 public key", l)
}
- h := hashFuncs[sig.Format].New()
+ hash, err := hashFunc(sig.Format)
+ if err != nil {
+ return err
+ }
+ h := hash.New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@@ -1112,11 +1148,14 @@ func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
algorithm = s.pubKey.Type()
}
- if !contains(s.Algorithms(), algorithm) {
+ if !slices.Contains(s.Algorithms(), algorithm) {
return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
}
- hashFunc := hashFuncs[algorithm]
+ hashFunc, err := hashFunc(algorithm)
+ if err != nil {
+ return nil, err
+ }
var digest []byte
if hashFunc != 0 {
h := hashFunc.New()
@@ -1451,6 +1490,7 @@ type openSSHEncryptedPrivateKey struct {
NumKeys uint32
PubKey []byte
PrivKeyBlock []byte
+ Rest []byte `ssh:"rest"`
}
type openSSHPrivateKey struct {
diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go
index de2639d5..87d626fb 100644
--- a/vendor/golang.org/x/crypto/ssh/mac.go
+++ b/vendor/golang.org/x/crypto/ssh/mac.go
@@ -7,11 +7,13 @@ package ssh
// Message authentication support
import (
+ "crypto/fips140"
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"hash"
+ "slices"
)
type macMode struct {
@@ -46,23 +48,37 @@ func (t truncatingMAC) Size() int {
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
-var macModes = map[string]*macMode{
- HMACSHA512ETM: {64, true, func(key []byte) hash.Hash {
+// macModes defines the supported MACs. MACs not included are not supported
+// and will not be negotiated, even if explicitly configured. When FIPS mode is
+// enabled, only FIPS-approved algorithms are included.
+var macModes = map[string]*macMode{}
+
+func init() {
+ macModes[HMACSHA512ETM] = &macMode{64, true, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key)
- }},
- HMACSHA256ETM: {32, true, func(key []byte) hash.Hash {
+ }}
+ macModes[HMACSHA256ETM] = &macMode{32, true, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
- }},
- HMACSHA512: {64, false, func(key []byte) hash.Hash {
+ }}
+ macModes[HMACSHA512] = &macMode{64, false, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key)
- }},
- HMACSHA256: {32, false, func(key []byte) hash.Hash {
+ }}
+ macModes[HMACSHA256] = &macMode{32, false, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
- }},
- HMACSHA1: {20, false, func(key []byte) hash.Hash {
+ }}
+
+ if fips140.Enabled() {
+ defaultMACs = slices.DeleteFunc(defaultMACs, func(algo string) bool {
+ _, ok := macModes[algo]
+ return !ok
+ })
+ return
+ }
+
+ macModes[HMACSHA1] = &macMode{20, false, func(key []byte) hash.Hash {
return hmac.New(sha1.New, key)
- }},
- InsecureHMACSHA196: {20, false, func(key []byte) hash.Hash {
+ }}
+ macModes[InsecureHMACSHA196] = &macMode{20, false, func(key []byte) hash.Hash {
return truncatingMAC{12, hmac.New(sha1.New, key)}
- }},
+ }}
}
diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go
index 251b9d06..ab22c3d3 100644
--- a/vendor/golang.org/x/crypto/ssh/messages.go
+++ b/vendor/golang.org/x/crypto/ssh/messages.go
@@ -792,7 +792,7 @@ func marshalString(to []byte, s []byte) []byte {
return to[len(s):]
}
-var bigIntType = reflect.TypeOf((*big.Int)(nil))
+var bigIntType = reflect.TypeFor[*big.Int]()
// Decode a packet into its corresponding message.
func decode(packet []byte) (interface{}, error) {
diff --git a/vendor/golang.org/x/crypto/ssh/mlkem.go b/vendor/golang.org/x/crypto/ssh/mlkem.go
index 657e1079..ddc0ed1f 100644
--- a/vendor/golang.org/x/crypto/ssh/mlkem.go
+++ b/vendor/golang.org/x/crypto/ssh/mlkem.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.24
-
package ssh
import (
@@ -13,23 +11,10 @@ import (
"errors"
"fmt"
"io"
- "runtime"
- "slices"
"golang.org/x/crypto/curve25519"
)
-func init() {
- // After Go 1.24rc1 mlkem swapped the order of return values of Encapsulate.
- // See #70950.
- if runtime.Version() == "go1.24rc1" {
- return
- }
- supportedKexAlgos = slices.Insert(supportedKexAlgos, 0, KeyExchangeMLKEM768X25519)
- defaultKexAlgos = slices.Insert(defaultKexAlgos, 0, KeyExchangeMLKEM768X25519)
- kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{}
-}
-
// mlkem768WithCurve25519sha256 implements the hybrid ML-KEM768 with
// curve25519-sha256 key exchange method, as described by
// draft-kampanakis-curdle-ssh-pq-ke-05 section 2.3.3.
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
index 98679ba5..064dcbaf 100644
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -10,6 +10,7 @@ import (
"fmt"
"io"
"net"
+ "slices"
"strings"
)
@@ -43,6 +44,9 @@ type Permissions struct {
// pass data from the authentication callbacks to the server
// application layer.
Extensions map[string]string
+
+ // ExtraData allows to store user defined data.
+ ExtraData map[any]any
}
type GSSAPIWithMICConfig struct {
@@ -126,6 +130,21 @@ type ServerConfig struct {
// Permissions.Extensions entry.
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
+ // VerifiedPublicKeyCallback, if non-nil, is called after a client
+ // successfully confirms having control over a key that was previously
+ // approved by PublicKeyCallback. The permissions object passed to the
+ // callback is the one returned by PublicKeyCallback for the given public
+ // key and its ownership is transferred to the callback. The returned
+ // Permissions object can be the same object, optionally modified, or a
+ // completely new object. If VerifiedPublicKeyCallback is non-nil,
+ // PublicKeyCallback is not allowed to return a PartialSuccessError, which
+ // can instead be returned by VerifiedPublicKeyCallback.
+ //
+ // VerifiedPublicKeyCallback does not affect which authentication methods
+ // are included in the list of methods that can be attempted by the client.
+ VerifiedPublicKeyCallback func(conn ConnMetadata, key PublicKey, permissions *Permissions,
+ signatureAlgorithm string) (*Permissions, error)
+
// KeyboardInteractiveCallback, if non-nil, is called when
// keyboard-interactive authentication is selected (RFC
// 4256). The client object's Challenge function should be
@@ -246,7 +265,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha
fullConf.PublicKeyAuthAlgorithms = defaultPubKeyAuthAlgos
} else {
for _, algo := range fullConf.PublicKeyAuthAlgorithms {
- if !contains(SupportedAlgorithms().PublicKeyAuths, algo) && !contains(InsecureAlgorithms().PublicKeyAuths, algo) {
+ if !slices.Contains(SupportedAlgorithms().PublicKeyAuths, algo) && !slices.Contains(InsecureAlgorithms().PublicKeyAuths, algo) {
c.Close()
return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo)
}
@@ -631,7 +650,7 @@ userAuthLoop:
return nil, parseError(msgUserAuthRequest)
}
algo := string(algoBytes)
- if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
+ if !slices.Contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
break
}
@@ -652,6 +671,9 @@ userAuthLoop:
candidate.pubKeyData = pubKeyData
candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey)
_, isPartialSuccessError := candidate.result.(*PartialSuccessError)
+ if isPartialSuccessError && config.VerifiedPublicKeyCallback != nil {
+ return nil, errors.New("ssh: invalid library usage: PublicKeyCallback must not return partial success when VerifiedPublicKeyCallback is defined")
+ }
if (candidate.result == nil || isPartialSuccessError) &&
candidate.perms != nil &&
@@ -695,7 +717,7 @@ userAuthLoop:
// ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public
// key type. The algorithm and public key type must be
// consistent: both must be certificate algorithms, or neither.
- if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
+ if !slices.Contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q",
pubKey.Type(), algo)
break
@@ -705,7 +727,7 @@ userAuthLoop:
// algorithm name that corresponds to algo with
// sig.Format. This is usually the same, but
// for certs, the names differ.
- if !contains(config.PublicKeyAuthAlgorithms, sig.Format) {
+ if !slices.Contains(config.PublicKeyAuthAlgorithms, sig.Format) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break
}
@@ -722,6 +744,12 @@ userAuthLoop:
authErr = candidate.result
perms = candidate.perms
+ if authErr == nil && config.VerifiedPublicKeyCallback != nil {
+ // Only call VerifiedPublicKeyCallback after the key has been accepted
+ // and successfully verified. If authErr is non-nil, the key is not
+ // considered verified and the callback must not run.
+ perms, authErr = config.VerifiedPublicKeyCallback(s, pubKey, perms, algo)
+ }
}
case "gssapi-with-mic":
if authConfig.GSSAPIWithMICConfig == nil {
diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go
index 24bd7c8e..a6249a12 100644
--- a/vendor/golang.org/x/crypto/ssh/ssh_gss.go
+++ b/vendor/golang.org/x/crypto/ssh/ssh_gss.go
@@ -106,6 +106,13 @@ func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) {
if !ok {
return nil, errors.New("parse uint32 failed")
}
+ // Each ASN.1 encoded OID must have a minimum
+ // of 2 bytes; 64 maximum mechanisms is an
+ // arbitrary, but reasonable ceiling.
+ const maxMechs = 64
+ if n > maxMechs || int(n)*2 > len(rest) {
+ return nil, errors.New("invalid mechanism count")
+ }
s := &userAuthRequestGSSAPI{
N: n,
OIDS: make([]asn1.ObjectIdentifier, n),
@@ -122,7 +129,6 @@ func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) {
if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil {
return nil, err
}
-
}
return s, nil
}
diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go
index b171b330..152470fc 100644
--- a/vendor/golang.org/x/crypto/ssh/streamlocal.go
+++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go
@@ -44,7 +44,7 @@ func (c *Client) ListenUnix(socketPath string) (net.Listener, error) {
if !ok {
return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer")
}
- ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"})
+ ch := c.forwards.add("unix", socketPath)
return &unixListener{socketPath, c, ch}, nil
}
@@ -96,7 +96,7 @@ func (l *unixListener) Accept() (net.Conn, error) {
// Close closes the listener.
func (l *unixListener) Close() error {
// this also closes the listener.
- l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"})
+ l.conn.forwards.remove("unix", l.socketPath)
m := streamLocalChannelForwardMsg{
l.socketPath,
}
diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go
index 93d844f0..78c41fe5 100644
--- a/vendor/golang.org/x/crypto/ssh/tcpip.go
+++ b/vendor/golang.org/x/crypto/ssh/tcpip.go
@@ -11,6 +11,7 @@ import (
"io"
"math/rand"
"net"
+ "net/netip"
"strconv"
"strings"
"sync"
@@ -22,14 +23,21 @@ import (
// the returned net.Listener. The listener must be serviced, or the
// SSH connection may hang.
// N must be "tcp", "tcp4", "tcp6", or "unix".
+//
+// If the address is a hostname, it is sent to the remote peer as-is, without
+// being resolved locally, and the Listener Addr method will return a zero IP.
func (c *Client) Listen(n, addr string) (net.Listener, error) {
switch n {
case "tcp", "tcp4", "tcp6":
- laddr, err := net.ResolveTCPAddr(n, addr)
+ host, portStr, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ port, err := strconv.ParseInt(portStr, 10, 32)
if err != nil {
return nil, err
}
- return c.ListenTCP(laddr)
+ return c.listenTCPInternal(host, int(port))
case "unix":
return c.ListenUnix(addr)
default:
@@ -102,15 +110,24 @@ func (c *Client) handleForwards() {
// ListenTCP requests the remote peer open a listening socket
// on laddr. Incoming connections will be available by calling
// Accept on the returned net.Listener.
+//
+// ListenTCP accepts an IP address, to provide a hostname use [Client.Listen]
+// with "tcp", "tcp4", or "tcp6" network instead.
func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
c.handleForwardsOnce.Do(c.handleForwards)
if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
return c.autoPortListenWorkaround(laddr)
}
+ return c.listenTCPInternal(laddr.IP.String(), laddr.Port)
+}
+
+func (c *Client) listenTCPInternal(host string, port int) (net.Listener, error) {
+ c.handleForwardsOnce.Do(c.handleForwards)
+
m := channelForwardMsg{
- laddr.IP.String(),
- uint32(laddr.Port),
+ host,
+ uint32(port),
}
// send message
ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
@@ -123,20 +140,33 @@ func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
// If the original port was 0, then the remote side will
// supply a real port number in the response.
- if laddr.Port == 0 {
+ if port == 0 {
var p struct {
Port uint32
}
if err := Unmarshal(resp, &p); err != nil {
return nil, err
}
- laddr.Port = int(p.Port)
+ port = int(p.Port)
}
+ // Construct a local address placeholder for the remote listener. If the
+ // original host is an IP address, preserve it so that Listener.Addr()
+ // reports the same IP. If the host is a hostname or cannot be parsed as an
+ // IP, fall back to IPv4zero. The port field is always set, even if the
+ // original port was 0, because in that case the remote server will assign
+ // one, allowing callers to determine which port was selected.
+ ip := net.IPv4zero
+ if parsed, err := netip.ParseAddr(host); err == nil {
+ ip = net.IP(parsed.AsSlice())
+ }
+ laddr := &net.TCPAddr{
+ IP: ip,
+ Port: port,
+ }
+ addr := net.JoinHostPort(host, strconv.FormatInt(int64(port), 10))
+ ch := c.forwards.add("tcp", addr)
- // Register this forward, using the port number we obtained.
- ch := c.forwards.add(laddr)
-
- return &tcpListener{laddr, c, ch}, nil
+ return &tcpListener{laddr, addr, c, ch}, nil
}
// forwardList stores a mapping between remote
@@ -149,8 +179,9 @@ type forwardList struct {
// forwardEntry represents an established mapping of a laddr on a
// remote ssh server to a channel connected to a tcpListener.
type forwardEntry struct {
- laddr net.Addr
- c chan forward
+ addr string // host:port or socket path
+ network string // tcp or unix
+ c chan forward
}
// forward represents an incoming forwarded tcpip connection. The
@@ -161,12 +192,13 @@ type forward struct {
raddr net.Addr // the raddr of the incoming connection
}
-func (l *forwardList) add(addr net.Addr) chan forward {
+func (l *forwardList) add(n, addr string) chan forward {
l.Lock()
defer l.Unlock()
f := forwardEntry{
- laddr: addr,
- c: make(chan forward, 1),
+ addr: addr,
+ network: n,
+ c: make(chan forward, 1),
}
l.entries = append(l.entries, f)
return f.c
@@ -185,19 +217,20 @@ func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
if port == 0 || port > 65535 {
return nil, fmt.Errorf("ssh: port number out of range: %d", port)
}
- ip := net.ParseIP(string(addr))
- if ip == nil {
+ ip, err := netip.ParseAddr(addr)
+ if err != nil {
return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
}
- return &net.TCPAddr{IP: ip, Port: int(port)}, nil
+ return &net.TCPAddr{IP: net.IP(ip.AsSlice()), Port: int(port)}, nil
}
func (l *forwardList) handleChannels(in <-chan NewChannel) {
for ch := range in {
var (
- laddr net.Addr
- raddr net.Addr
- err error
+ addr string
+ network string
+ raddr net.Addr
+ err error
)
switch channelType := ch.ChannelType(); channelType {
case "forwarded-tcpip":
@@ -207,40 +240,34 @@ func (l *forwardList) handleChannels(in <-chan NewChannel) {
continue
}
- // RFC 4254 section 7.2 specifies that incoming
- // addresses should list the address, in string
- // format. It is implied that this should be an IP
- // address, as it would be impossible to connect to it
- // otherwise.
- laddr, err = parseTCPAddr(payload.Addr, payload.Port)
- if err != nil {
- ch.Reject(ConnectionFailed, err.Error())
- continue
- }
+ // RFC 4254 section 7.2 specifies that incoming addresses should
+ // list the address that was connected, in string format. It is the
+ // same address used in the tcpip-forward request. The originator
+ // address is an IP address instead.
+ addr = net.JoinHostPort(payload.Addr, strconv.FormatUint(uint64(payload.Port), 10))
+
raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort)
if err != nil {
ch.Reject(ConnectionFailed, err.Error())
continue
}
-
+ network = "tcp"
case "forwarded-streamlocal@openssh.com":
var payload forwardedStreamLocalPayload
if err = Unmarshal(ch.ExtraData(), &payload); err != nil {
ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error())
continue
}
- laddr = &net.UnixAddr{
- Name: payload.SocketPath,
- Net: "unix",
- }
+ addr = payload.SocketPath
raddr = &net.UnixAddr{
Name: "@",
Net: "unix",
}
+ network = "unix"
default:
panic(fmt.Errorf("ssh: unknown channel type %s", channelType))
}
- if ok := l.forward(laddr, raddr, ch); !ok {
+ if ok := l.forward(network, addr, raddr, ch); !ok {
// Section 7.2, implementations MUST reject spurious incoming
// connections.
ch.Reject(Prohibited, "no forward for address")
@@ -252,11 +279,11 @@ func (l *forwardList) handleChannels(in <-chan NewChannel) {
// remove removes the forward entry, and the channel feeding its
// listener.
-func (l *forwardList) remove(addr net.Addr) {
+func (l *forwardList) remove(n, addr string) {
l.Lock()
defer l.Unlock()
for i, f := range l.entries {
- if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() {
+ if n == f.network && addr == f.addr {
l.entries = append(l.entries[:i], l.entries[i+1:]...)
close(f.c)
return
@@ -274,11 +301,11 @@ func (l *forwardList) closeAll() {
l.entries = nil
}
-func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool {
+func (l *forwardList) forward(n, addr string, raddr net.Addr, ch NewChannel) bool {
l.Lock()
defer l.Unlock()
for _, f := range l.entries {
- if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() {
+ if n == f.network && addr == f.addr {
f.c <- forward{newCh: ch, raddr: raddr}
return true
}
@@ -288,6 +315,7 @@ func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool {
type tcpListener struct {
laddr *net.TCPAddr
+ addr string
conn *Client
in <-chan forward
@@ -314,13 +342,21 @@ func (l *tcpListener) Accept() (net.Conn, error) {
// Close closes the listener.
func (l *tcpListener) Close() error {
+ host, port, err := net.SplitHostPort(l.addr)
+ if err != nil {
+ return err
+ }
+ rport, err := strconv.ParseUint(port, 10, 32)
+ if err != nil {
+ return err
+ }
m := channelForwardMsg{
- l.laddr.IP.String(),
- uint32(l.laddr.Port),
+ host,
+ uint32(rport),
}
// this also closes the listener.
- l.conn.forwards.remove(l.laddr)
+ l.conn.forwards.remove("tcp", l.addr)
ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
if err == nil && !ok {
err = errors.New("ssh: cancel-tcpip-forward failed")
diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
index 66361984..fa3dd6a4 100644
--- a/vendor/golang.org/x/crypto/ssh/transport.go
+++ b/vendor/golang.org/x/crypto/ssh/transport.go
@@ -8,6 +8,7 @@ import (
"bufio"
"bytes"
"errors"
+ "fmt"
"io"
"log"
)
@@ -254,6 +255,9 @@ var (
// (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs DirectionAlgorithms, kex *kexResult) (packetCipher, error) {
cipherMode := cipherModes[algs.Cipher]
+ if cipherMode == nil {
+ return nil, fmt.Errorf("ssh: unsupported cipher %v", algs.Cipher)
+ }
iv := make([]byte, cipherMode.ivSize)
key := make([]byte, cipherMode.keySize)
diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go
index 2a0123d4..48dbd82a 100644
--- a/vendor/golang.org/x/mod/modfile/print.go
+++ b/vendor/golang.org/x/mod/modfile/print.go
@@ -33,7 +33,7 @@ type printer struct {
}
// printf prints to the buffer.
-func (p *printer) printf(format string, args ...interface{}) {
+func (p *printer) printf(format string, args ...any) {
fmt.Fprintf(p, format, args...)
}
diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go
index 2d748680..504a2f1d 100644
--- a/vendor/golang.org/x/mod/modfile/read.go
+++ b/vendor/golang.org/x/mod/modfile/read.go
@@ -94,7 +94,7 @@ func (x *FileSyntax) Span() (start, end Position) {
// line, the new line is added at the end of the block containing hint,
// extracting hint into a new block if it is not yet in one.
//
-// If the hint is non-nil buts its first token does not match,
+// If the hint is non-nil but its first token does not match,
// the new line is added after the block containing hint
// (or hint itself, if not in a block).
//
@@ -600,7 +600,7 @@ func (in *input) readToken() {
// Checked all punctuation. Must be identifier token.
if c := in.peekRune(); !isIdent(c) {
- in.Error(fmt.Sprintf("unexpected input character %#q", c))
+ in.Error(fmt.Sprintf("unexpected input character %#q", rune(c)))
}
// Scan over identifier.
diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go
index a86ee4fd..c5b8305d 100644
--- a/vendor/golang.org/x/mod/modfile/rule.go
+++ b/vendor/golang.org/x/mod/modfile/rule.go
@@ -368,7 +368,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
Err: err,
})
}
- errorf := func(format string, args ...interface{}) {
+ errorf := func(format string, args ...any) {
wrapError(fmt.Errorf(format, args...))
}
@@ -574,7 +574,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V
Err: err,
}
}
- errorf := func(format string, args ...interface{}) *Error {
+ errorf := func(format string, args ...any) *Error {
return wrapError(fmt.Errorf(format, args...))
}
@@ -685,7 +685,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string,
Err: err,
})
}
- errorf := func(format string, args ...interface{}) {
+ errorf := func(format string, args ...any) {
wrapError(fmt.Errorf(format, args...))
}
@@ -1594,7 +1594,7 @@ func (f *File) AddRetract(vi VersionInterval, rationale string) error {
r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]")
}
if rationale != "" {
- for _, line := range strings.Split(rationale, "\n") {
+ for line := range strings.SplitSeq(rationale, "\n") {
com := Comment{Token: "// " + line}
r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com)
}
diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go
index 16e1aa7a..739c13f4 100644
--- a/vendor/golang.org/x/mod/module/module.go
+++ b/vendor/golang.org/x/mod/module/module.go
@@ -261,7 +261,7 @@ func modPathOK(r rune) bool {
// importPathOK reports whether r can appear in a package import path element.
//
-// Import paths are intermediate between module paths and file paths: we allow
+// Import paths are intermediate between module paths and file paths: we
// disallow characters that would be confusing or ambiguous as arguments to
// 'go get' (such as '@' and ' ' ), but allow certain characters that are
// otherwise-unambiguous on the command line and historically used for some
@@ -802,8 +802,8 @@ func MatchPrefixPatterns(globs, target string) bool {
for globs != "" {
// Extract next non-empty glob in comma-separated list.
var glob string
- if i := strings.Index(globs, ","); i >= 0 {
- glob, globs = globs[:i], globs[i+1:]
+ if before, after, ok := strings.Cut(globs, ","); ok {
+ glob, globs = before, after
} else {
glob, globs = globs, ""
}
diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go
index 628f8fd6..824b282c 100644
--- a/vendor/golang.org/x/mod/semver/semver.go
+++ b/vendor/golang.org/x/mod/semver/semver.go
@@ -45,8 +45,8 @@ func IsValid(v string) bool {
// Canonical returns the canonical formatting of the semantic version v.
// It fills in any missing .MINOR or .PATCH and discards build metadata.
-// Two semantic versions compare equal only if their canonical formattings
-// are identical strings.
+// Two semantic versions compare equal only if their canonical formatting
+// is an identical string.
// The canonical invalid semantic version is the empty string.
func Canonical(v string) string {
p, ok := parse(v)
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
index ca645d9a..8a7a89d0 100644
--- a/vendor/golang.org/x/net/http2/config.go
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -27,6 +27,7 @@ import (
// - If the resulting value is zero or out of range, use a default.
type http2Config struct {
MaxConcurrentStreams uint32
+ StrictMaxConcurrentRequests bool
MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32
@@ -55,7 +56,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
CountError: h2.CountError,
}
- fillNetHTTPServerConfig(&conf, h1)
+ fillNetHTTPConfig(&conf, h1.HTTP2)
setConfigDefaults(&conf, true)
return conf
}
@@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
- MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
- MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
- MaxReadFrameSize: h2.MaxReadFrameSize,
- SendPingTimeout: h2.ReadIdleTimeout,
- PingTimeout: h2.PingTimeout,
- WriteByteTimeout: h2.WriteByteTimeout,
+ StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
}
// Unlike most config fields, where out-of-range values revert to the default,
@@ -81,7 +83,7 @@ func configFromTransport(h2 *Transport) http2Config {
}
if h2.t1 != nil {
- fillNetHTTPTransportConfig(&conf, h2.t1)
+ fillNetHTTPConfig(&conf, h2.t1.HTTP2)
}
setConfigDefaults(&conf, false)
return conf
@@ -120,3 +122,48 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 {
const typicalHeaders = 10 // conservative
return n + typicalHeaders*perFieldOverhead
}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if http2ConfigStrictMaxConcurrentRequests(h2) {
+ conf.StrictMaxConcurrentRequests = true
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
deleted file mode 100644
index 5b516c55..00000000
--- a/vendor/golang.org/x/net/http2/config_go124.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.24
-
-package http2
-
-import "net/http"
-
-// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
- fillNetHTTPConfig(conf, srv.HTTP2)
-}
-
-// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
- fillNetHTTPConfig(conf, tr.HTTP2)
-}
-
-func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
- if h2 == nil {
- return
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxEncoderHeaderTableSize != 0 {
- conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
- }
- if h2.MaxDecoderHeaderTableSize != 0 {
- conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxReadFrameSize != 0 {
- conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
- }
- if h2.MaxReceiveBufferPerConnection != 0 {
- conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
- }
- if h2.MaxReceiveBufferPerStream != 0 {
- conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
- }
- if h2.SendPingTimeout != 0 {
- conf.SendPingTimeout = h2.SendPingTimeout
- }
- if h2.PingTimeout != 0 {
- conf.PingTimeout = h2.PingTimeout
- }
- if h2.WriteByteTimeout != 0 {
- conf.WriteByteTimeout = h2.WriteByteTimeout
- }
- if h2.PermitProhibitedCipherSuites {
- conf.PermitProhibitedCipherSuites = true
- }
- if h2.CountError != nil {
- conf.CountError = h2.CountError
- }
-}
diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go
new file mode 100644
index 00000000..b4373fe3
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go125.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return false
+}
diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go
new file mode 100644
index 00000000..6b071c14
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go126.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return h2.StrictMaxConcurrentRequests
+}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
deleted file mode 100644
index 060fd6c6..00000000
--- a/vendor/golang.org/x/net/http2/config_pre_go124.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.24
-
-package http2
-
-import "net/http"
-
-// Pre-Go 1.24 fallback.
-// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
-
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
-
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index db3264da..9a4bd123 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -280,6 +280,8 @@ type Framer struct {
// lastHeaderStream is non-zero if the last frame was an
// unfinished HEADERS/CONTINUATION.
lastHeaderStream uint32
+ // lastFrameType holds the type of the last frame for verifying frame order.
+ lastFrameType FrameType
maxReadSize uint32
headerBuf [frameHeaderLen]byte
@@ -347,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 {
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
// Write the FrameHeader.
f.wbuf = append(f.wbuf[:0],
- 0, // 3 bytes of length, filled in in endWrite
+ 0, // 3 bytes of length, filled in endWrite
0,
0,
byte(ftype),
@@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool {
return err != nil
}
-// ReadFrame reads a single frame. The returned Frame is only valid
-// until the next call to ReadFrame.
+// ReadFrameHeader reads the header of the next frame.
+// It reads the 9-byte fixed frame header, and does not read any portion of the
+// frame payload. The caller is responsible for consuming the payload, either
+// with ReadFrameForHeader or directly from the Framer's io.Reader.
//
-// If the frame is larger than previously set with SetMaxReadFrameSize, the
-// returned error is ErrFrameTooLarge. Other errors may be of type
-// ConnectionError, StreamError, or anything else from the underlying
-// reader.
+// If the frame is larger than previously set with SetMaxReadFrameSize, it
+// returns the frame header and ErrFrameTooLarge.
//
-// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
-// indicates the stream responsible for the error.
-func (fr *Framer) ReadFrame() (Frame, error) {
+// If the returned FrameHeader.StreamID is non-zero, it indicates the stream
+// responsible for the error.
+func (fr *Framer) ReadFrameHeader() (FrameHeader, error) {
fr.errDetail = nil
- if fr.lastFrame != nil {
- fr.lastFrame.invalidate()
- }
fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
if err != nil {
- return nil, err
+ return fh, err
}
if fh.Length > fr.maxReadSize {
if fh == invalidHTTP1LookingFrameHeader() {
- return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
+ return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
}
- return nil, ErrFrameTooLarge
+ return fh, ErrFrameTooLarge
+ }
+ if err := fr.checkFrameOrder(fh); err != nil {
+ return fh, err
+ }
+ return fh, nil
+}
+
+// ReadFrameForHeader reads the payload for the frame with the given FrameHeader.
+//
+// It behaves identically to ReadFrame, other than not checking the maximum
+// frame size.
+func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) {
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
}
payload := fr.getReadBuf(fh.Length)
if _, err := io.ReadFull(fr.r, payload); err != nil {
@@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
}
return nil, err
}
- if err := fr.checkFrameOrder(f); err != nil {
- return nil, err
- }
+ fr.lastFrame = f
if fr.logReads {
fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
}
@@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) {
return f, nil
}
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame or ReadFrameBodyForHeader.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+//
+// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
+// indicates the stream responsible for the error.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ fh, err := fr.ReadFrameHeader()
+ if err != nil {
+ return nil, err
+ }
+ return fr.ReadFrameForHeader(fh)
+}
+
// connError returns ConnectionError(code) but first
// stashes away a public reason to the caller can optionally relay it
// to the peer before hanging up on them. This might help others debug
@@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error {
// checkFrameOrder reports an error if f is an invalid frame to return
// next from ReadFrame. Mostly it checks whether HEADERS and
// CONTINUATION frames are contiguous.
-func (fr *Framer) checkFrameOrder(f Frame) error {
- last := fr.lastFrame
- fr.lastFrame = f
+func (fr *Framer) checkFrameOrder(fh FrameHeader) error {
+ lastType := fr.lastFrameType
+ fr.lastFrameType = fh.Type
if fr.AllowIllegalReads {
return nil
}
- fh := f.Header()
if fr.lastHeaderStream != 0 {
if fh.Type != FrameContinuation {
return fr.connError(ErrCodeProtocol,
fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
fh.Type, fh.StreamID,
- last.Header().Type, fr.lastHeaderStream))
+ lastType, fr.lastHeaderStream))
}
if fh.StreamID != fr.lastHeaderStream {
return fr.connError(ErrCodeProtocol,
@@ -1152,7 +1180,16 @@ type PriorityFrame struct {
PriorityParam
}
-// PriorityParam are the stream prioritzation parameters.
+var defaultRFC9218Priority = PriorityParam{
+ incremental: 0,
+ urgency: 3,
+}
+
+// Note that HTTP/2 has had two different prioritization schemes, and
+// PriorityParam struct below is a superset of both schemes. The exported
+// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
+
+// PriorityParam are the stream prioritization parameters.
type PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the
// stream that this stream depends on. Zero means no
@@ -1167,6 +1204,20 @@ type PriorityParam struct {
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
+
+ // "The urgency (u) parameter value is Integer (see Section 3.3.1 of
+ // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
+ // priority. The default is 3."
+ urgency uint8
+
+ // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
+ // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
+ // incrementally, i.e., provide some meaningful output as chunks of the
+ // response arrive."
+ //
+ // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
+ // avoid unnecessary type conversions and because either type takes 1 byte.
+ incremental uint8
}
func (p PriorityParam) IsZero() bool {
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
index 9933c9f8..9921ca09 100644
--- a/vendor/golang.org/x/net/http2/gotrack.go
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -15,21 +15,32 @@ import (
"runtime"
"strconv"
"sync"
+ "sync/atomic"
)
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+// Setting DebugGoroutines to false during a test to disable goroutine debugging
+// results in race detector complaints when a test leaves goroutines running before
+// returning. Tests shouldn't do this, of course, but when they do it generally shows
+// up as infrequent, hard-to-debug flakes. (See #66519.)
+//
+// Disable goroutine debugging during individual tests with an atomic bool.
+// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
+// here is harmless.)
+var disableDebugGoroutines atomic.Bool
+
type goroutineLock uint64
func newGoroutineLock() goroutineLock {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return 0
}
return goroutineLock(curGoroutineID())
}
func (g goroutineLock) check() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() != uint64(g) {
@@ -38,7 +49,7 @@ func (g goroutineLock) check() {
}
func (g goroutineLock) checkNotOn() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() == uint64(g) {
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index ea5ae629..105fe12f 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -15,7 +15,6 @@ package http2 // import "golang.org/x/net/http2"
import (
"bufio"
- "context"
"crypto/tls"
"errors"
"fmt"
@@ -35,7 +34,6 @@ var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
- inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
@@ -255,15 +253,13 @@ func (cw closeWaiter) Wait() {
// idle memory usage with many connections.
type bufferedWriter struct {
_ incomparable
- group synctestGroupInterface // immutable
- conn net.Conn // immutable
- bw *bufio.Writer // non-nil when data is buffered
- byteTimeout time.Duration // immutable, WriteByteTimeout
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
}
-func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
return &bufferedWriter{
- group: group,
conn: conn,
byteTimeout: timeout,
}
@@ -314,24 +310,18 @@ func (w *bufferedWriter) Flush() error {
type bufferedWriterTimeoutWriter bufferedWriter
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
- return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+ return writeWithByteTimeout(w.conn, w.byteTimeout, p)
}
// writeWithByteTimeout writes to conn.
// If more than timeout passes without any bytes being written to the connection,
// the write fails.
-func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
if timeout <= 0 {
return conn.Write(p)
}
for {
- var now time.Time
- if group == nil {
- now = time.Now()
- } else {
- now = group.Now()
- }
- conn.SetWriteDeadline(now.Add(timeout))
+ conn.SetWriteDeadline(time.Now().Add(timeout))
nn, err := conn.Write(p[n:])
n += nn
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
@@ -417,14 +407,3 @@ func (s *sorter) SortStrings(ss []string) {
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()
-
-// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
-// It's defined as an interface here to let us keep synctestGroup entirely test-only
-// and not a part of non-test builds.
-type synctestGroupInterface interface {
- Join()
- Now() time.Time
- NewTimer(d time.Duration) timer
- AfterFunc(d time.Duration, f func()) timer
- ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
-}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 51fca38f..bdc5520e 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -176,44 +176,15 @@ type Server struct {
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
-
- // Synchronization group used for testing.
- // Outside of tests, this is nil.
- group synctestGroupInterface
-}
-
-func (s *Server) markNewGoroutine() {
- if s.group != nil {
- s.group.Join()
- }
-}
-
-func (s *Server) now() time.Time {
- if s.group != nil {
- return s.group.Now()
- }
- return time.Now()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (s *Server) newTimer(d time.Duration) timer {
- if s.group != nil {
- return s.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (s *Server) afterFunc(d time.Duration, f func()) timer {
- if s.group != nil {
- return s.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
+
+ // Pool of error channels. This is per-Server rather than global
+ // because channels can't be reused across synctest bubbles.
+ errChanPool sync.Pool
}
func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -245,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() {
s.mu.Unlock()
}
+// Global error channel pool used for uninitialized Servers.
+// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
+var errChanPool = sync.Pool{
+ New: func() any { return make(chan error, 1) },
+}
+
+func (s *serverInternalState) getErrChan() chan error {
+ if s == nil {
+ return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
+ }
+ return s.errChanPool.Get().(chan error)
+}
+
+func (s *serverInternalState) putErrChan(ch chan error) {
+ if s == nil {
+ errChanPool.Put(ch) // Server used without calling ConfigureServer
+ return
+ }
+ s.errChanPool.Put(ch)
+}
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -257,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil {
conf = new(Server)
}
- conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ conf.state = &serverInternalState{
+ activeConns: make(map[*serverConn]struct{}),
+ errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
+ }
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
@@ -423,6 +418,9 @@ func (o *ServeConnOpts) handler() http.Handler {
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ if opts == nil {
+ opts = &ServeConnOpts{}
+ }
s.serveConn(c, opts, nil)
}
@@ -438,7 +436,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
+ bw: newBufferedWriter(c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -638,11 +636,11 @@ type serverConn struct {
pingSent bool
sentPingData [8]byte
goAwayCode ErrCode
- shutdownTimer timer // nil until used
- idleTimer timer // nil if unused
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
readIdleTimeout time.Duration
pingTimeout time.Duration
- readIdleTimer timer // nil if unused
+ readIdleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -687,12 +685,12 @@ type stream struct {
flow outflow // limits writing from Handler to client
inflow inflow // what the client is allowed to POST/etc to us
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- readDeadline timer // nil if unused
- writeDeadline timer // nil if unused
- closeErr error // set before cw is closed
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline *time.Timer // nil if unused
+ writeDeadline *time.Timer // nil if unused
+ closeErr error // set before cw is closed
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -848,7 +846,6 @@ type readFrameResult struct {
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
- sc.srv.markNewGoroutine()
gate := make(chan struct{})
gateDone := func() { gate <- struct{}{} }
for {
@@ -881,7 +878,6 @@ type frameWriteResult struct {
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
- sc.srv.markNewGoroutine()
var err error
if wd == nil {
err = wr.write.writeFrame(sc)
@@ -965,22 +961,22 @@ func (sc *serverConn) serve(conf http2Config) {
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 {
- sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
if conf.SendPingTimeout > 0 {
sc.readIdleTimeout = conf.SendPingTimeout
- sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
defer sc.readIdleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
- lastFrameTime := sc.srv.now()
+ lastFrameTime := time.Now()
loopNum := 0
for {
loopNum++
@@ -994,7 +990,7 @@ func (sc *serverConn) serve(conf http2Config) {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
- lastFrameTime = sc.srv.now()
+ lastFrameTime = time.Now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1077,7 +1073,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
}
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
- now := sc.srv.now()
+ now := time.Now()
if pingAt.After(now) {
// We received frames since arming the ping timer.
// Reset it for the next possible timeout.
@@ -1141,10 +1137,10 @@ func (sc *serverConn) readPreface() error {
errc <- nil
}
}()
- timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
- case <-timer.C():
+ case <-timer.C:
return errPrefaceTimeout
case err := <-errc:
if err == nil {
@@ -1156,10 +1152,6 @@ func (sc *serverConn) readPreface() error {
}
}
-var errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@@ -1167,7 +1159,7 @@ var writeDataPool = sync.Pool{
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := errChanPool.Get().(chan error)
+ ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1199,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return errStreamClosed
}
}
- errChanPool.Put(ch)
+ sc.srv.state.putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@@ -1513,7 +1505,7 @@ func (sc *serverConn) goAway(code ErrCode) {
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@@ -2118,7 +2110,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
return sc.scheduleHandler(id, rw, req, handler)
@@ -2216,7 +2208,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
- st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
@@ -2405,7 +2397,6 @@ func (sc *serverConn) handlerDone() {
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
@@ -2454,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
- errc = errChanPool.Get().(chan error)
+ errc = sc.srv.state.getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@@ -2466,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
if errc != nil {
select {
case err := <-errc:
- errChanPool.Put(errc)
+ sc.srv.state.putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@@ -2573,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
if err == io.EOF {
b.sawEOF = true
}
- if b.conn == nil && inTests {
+ if b.conn == nil {
return
}
b.conn.noteBodyReadFromHandler(b.stream, n, err)
@@ -2702,7 +2693,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
- date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+ date = time.Now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
@@ -2824,7 +2815,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onReadTimeout()
@@ -2840,9 +2831,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.readDeadline = nil
} else if st.readDeadline == nil {
- st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
} else {
- st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.readDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -2850,7 +2841,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout()
@@ -2866,9 +2857,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.writeDeadline = nil
} else if st.writeDeadline == nil {
- st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
} else {
- st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.writeDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -3147,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
- done: errChanPool.Get().(chan error),
+ done: sc.srv.state.getErrChan(),
}
select {
@@ -3164,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
- errChanPool.Put(msg.done)
+ sc.srv.state.putErrChan(msg.done)
return err
}
}
diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go
deleted file mode 100644
index 0b1c17b8..00000000
--- a/vendor/golang.org/x/net/http2/timer.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package http2
-
-import "time"
-
-// A timer is a time.Timer, as an interface which can be replaced in tests.
-type timer = interface {
- C() <-chan time.Time
- Reset(d time.Duration) bool
- Stop() bool
-}
-
-// timeTimer adapts a time.Timer to the timer interface.
-type timeTimer struct {
- *time.Timer
-}
-
-func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index f26356b9..ccb87e6d 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -9,6 +9,7 @@ package http2
import (
"bufio"
"bytes"
+ "compress/flate"
"compress/gzip"
"context"
"crypto/rand"
@@ -193,50 +194,6 @@ type Transport struct {
type transportTestHooks struct {
newclientconn func(*ClientConn)
- group synctestGroupInterface
-}
-
-func (t *Transport) markNewGoroutine() {
- if t != nil && t.transportTestHooks != nil {
- t.transportTestHooks.group.Join()
- }
-}
-
-func (t *Transport) now() time.Time {
- if t != nil && t.transportTestHooks != nil {
- return t.transportTestHooks.group.Now()
- }
- return time.Now()
-}
-
-func (t *Transport) timeSince(when time.Time) time.Duration {
- if t != nil && t.transportTestHooks != nil {
- return t.now().Sub(when)
- }
- return time.Since(when)
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (t *Transport) newTimer(d time.Duration) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (t *Transport) afterFunc(d time.Duration, f func()) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
-}
-
-func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
- }
- return context.WithTimeout(ctx, d)
}
func (t *Transport) maxHeaderListSize() uint32 {
@@ -366,7 +323,7 @@ type ClientConn struct {
readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never
- idleTimer timer
+ idleTimer *time.Timer
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
@@ -399,6 +356,7 @@ type ClientConn struct {
readIdleTimeout time.Duration
pingTimeout time.Duration
extendedConnectAllowed bool
+ strictMaxConcurrentStreams bool
// rstStreamPingsBlocked works around an unfortunate gRPC behavior.
// gRPC strictly limits the number of PING frames that it will receive.
@@ -418,11 +376,24 @@ type ClientConn struct {
// completely unresponsive connection.
pendingResets int
+ // readBeforeStreamID is the smallest stream ID that has not been followed by
+ // a frame read from the peer. We use this to determine when a request may
+ // have been sent to a completely unresponsive connection:
+ // If the request ID is less than readBeforeStreamID, then we have had some
+ // indication of life on the connection since sending the request.
+ readBeforeStreamID uint32
+
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
// Lock reqmu BEFORE mu or wmu.
reqHeaderMu chan struct{}
+ // internalStateHook reports state changes back to the net/http.ClientConn.
+ // Note that this is different from the user state hook registered by
+ // net/http.ClientConn.SetStateHook: The internal hook calls ClientConn,
+ // which calls the user hook.
+ internalStateHook func()
+
// wmu is held while writing.
// Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
// Only acquire both at the same time when changing peer settings.
@@ -534,14 +505,12 @@ func (cs *clientStream) closeReqBodyLocked() {
cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed
go func() {
- cs.cc.t.markNewGoroutine()
cs.reqBody.Close()
close(reqBodyClosed)
}()
}
type stickyErrWriter struct {
- group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@@ -551,7 +520,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
- n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ n, err = writeWithByteTimeout(sew.conn, sew.timeout, p)
*sew.err = err
return n, err
}
@@ -650,9 +619,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff)
- tm := t.newTimer(d)
+ tm := time.NewTimer(d)
select {
- case <-tm.C():
+ case <-tm.C:
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
@@ -699,6 +668,7 @@ var (
errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnNotEstablished = errors.New("http2: client conn could not be established")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+ errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close")
)
// shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -753,7 +723,7 @@ func canRetryError(err error) bool {
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
if t.transportTestHooks != nil {
- return t.newClientConn(nil, singleUse)
+ return t.newClientConn(nil, singleUse, nil)
}
host, _, err := net.SplitHostPort(addr)
if err != nil {
@@ -763,7 +733,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
if err != nil {
return nil, err
}
- return t.newClientConn(tconn, singleUse)
+ return t.newClientConn(tconn, singleUse, nil)
}
func (t *Transport) newTLSConfig(host string) *tls.Config {
@@ -815,10 +785,10 @@ func (t *Transport) expectContinueTimeout() time.Duration {
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
- return t.newClientConn(c, t.disableKeepAlives())
+ return t.newClientConn(c, t.disableKeepAlives(), nil)
}
-func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) {
conf := configFromTransport(t)
cc := &ClientConn{
t: t,
@@ -829,7 +799,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests,
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
seenSettingsChan: make(chan struct{}),
@@ -838,14 +809,12 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
pingTimeout: conf.PingTimeout,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
- lastActive: t.now(),
+ lastActive: time.Now(),
+ internalStateHook: internalStateHook,
}
- var group synctestGroupInterface
if t.transportTestHooks != nil {
- t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc)
c = cc.tconn
- group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -857,7 +826,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
- group: group,
conn: c,
timeout: conf.WriteByteTimeout,
err: &cc.werr,
@@ -906,7 +874,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// Start the idle timer after the connection is fully initialized.
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
- cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+ cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
go cc.readLoop()
@@ -917,7 +885,7 @@ func (cc *ClientConn) healthCheck() {
pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
- ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
defer cancel()
cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
@@ -1067,7 +1035,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return
}
var maxConcurrentOkay bool
- if cc.t.StrictMaxConcurrentStreams {
+ if cc.strictMaxConcurrentStreams {
// We'll tell the caller we can take a new request to
// prevent the caller from dialing a new TCP
// connection, but then we'll block later before
@@ -1083,10 +1051,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams)
}
- st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
- !cc.doNotReuse &&
- int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
- !cc.tooIdleLocked()
+ st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked()
// If this connection has never been used for a request and is closed,
// then let it take a request (which will fail).
@@ -1102,6 +1067,31 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return
}
+func (cc *ClientConn) isUsableLocked() bool {
+ return cc.goAway == nil &&
+ !cc.closed &&
+ !cc.closing &&
+ !cc.doNotReuse &&
+ int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
+ !cc.tooIdleLocked()
+}
+
+// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn.
+//
+// This follows slightly different rules than clientConnIdleState.canTakeNewRequest.
+// We only permit reservations up to the conn's concurrency limit.
+// This differs from ClientConn.ReserveNewRequest, which permits reservations
+// past the limit when StrictMaxConcurrentStreams is set.
+func (cc *ClientConn) canReserveLocked() bool {
+ if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) {
+ return false
+ }
+ if !cc.isUsableLocked() {
+ return false
+ }
+ return true
+}
+
// currentRequestCountLocked reports the number of concurrency slots currently in use,
// including active streams, reserved slots, and reset streams waiting for acknowledgement.
func (cc *ClientConn) currentRequestCountLocked() int {
@@ -1113,6 +1103,14 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
return st.canTakeNewRequest
}
+// availableLocked reports the number of concurrency slots available.
+func (cc *ClientConn) availableLocked() int {
+ if !cc.canTakeNewRequestLocked() {
+ return 0
+ }
+ return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked())
+}
+
// tooIdleLocked reports whether this connection has been been sitting idle
// for too much wall time.
func (cc *ClientConn) tooIdleLocked() bool {
@@ -1120,7 +1118,7 @@ func (cc *ClientConn) tooIdleLocked() bool {
// times are compared based on their wall time. We don't want
// to reuse a connection that's been sitting idle during
// VM/laptop suspend if monotonic time was also frozen.
- return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1137,6 +1135,7 @@ func (cc *ClientConn) closeConn() {
t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn)
defer t.Stop()
cc.tconn.Close()
+ cc.maybeCallStateHook()
}
// A tls.Conn.Close can hang for a long time if the peer is unresponsive.
@@ -1186,7 +1185,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
done := make(chan struct{})
cancelled := false // guarded by cc.mu
go func() {
- cc.t.markNewGoroutine()
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1257,8 +1255,7 @@ func (cc *ClientConn) closeForError(err error) {
//
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func (cc *ClientConn) Close() error {
- err := errors.New("http2: client connection force closed via ClientConn.Close")
- cc.closeForError(err)
+ cc.closeForError(errClientConnForceClosed)
return nil
}
@@ -1427,7 +1424,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
//
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
- cs.cc.t.markNewGoroutine()
err := cs.writeRequest(req, streamf)
cs.cleanupWriteRequest(err)
}
@@ -1558,9 +1554,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 {
- timer := cc.t.newTimer(d)
+ timer := time.NewTimer(d)
defer timer.Stop()
- respHeaderTimer = timer.C()
+ respHeaderTimer = timer.C
respHeaderRecv = cs.respHeaderRecv
}
// Wait until the peer half-closes its end of the stream,
@@ -1665,6 +1661,8 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
}
bodyClosed := cs.reqBodyClosed
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
+ // Have we read any frames from the connection since sending this request?
+ readSinceStream := cc.readBeforeStreamID > cs.ID
cc.mu.Unlock()
if mustCloseBody {
cs.reqBody.Close()
@@ -1696,8 +1694,10 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
//
// This could be due to the server becoming unresponsive.
// To avoid sending too many requests on a dead connection,
- // we let the request continue to consume a concurrency slot
- // until we can confirm the server is still responding.
+ // if we haven't read any frames from the connection since
+ // sending this request, we let it continue to consume
+ // a concurrency slot until we can confirm the server is
+ // still responding.
// We do this by sending a PING frame along with the RST_STREAM
// (unless a ping is already in flight).
//
@@ -1708,7 +1708,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
// because it's short lived and will probably be closed before
// we get the ping response.
ping := false
- if !closeOnIdle {
+ if !closeOnIdle && !readSinceStream {
cc.mu.Lock()
// rstStreamPingsBlocked works around a gRPC behavior:
// see comment on the field for details.
@@ -1742,6 +1742,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
}
close(cs.donec)
+ cc.maybeCallStateHook()
}
// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams.
@@ -1753,7 +1754,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
// Return a fatal error which aborts the retry loop.
return errClientConnNotEstablished
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
@@ -2092,10 +2093,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
if len(cc.streams) != slen-1 {
panic("forgetting unknown stream id")
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = cc.t.now()
+ cc.lastIdle = time.Now()
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
@@ -2121,7 +2122,6 @@ type clientConnReadLoop struct {
// readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() {
- cc.t.markNewGoroutine()
rl := &clientConnReadLoop{cc: cc}
defer rl.cleanup()
cc.readerErr = rl.run()
@@ -2188,9 +2188,9 @@ func (rl *clientConnReadLoop) cleanup() {
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
unusedWaitTime = cc.idleTimeout
}
- idleTime := cc.t.now().Sub(cc.lastActive)
+ idleTime := time.Now().Sub(cc.lastActive)
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
- cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+ cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() {
cc.t.connPool().MarkDead(cc)
})
} else {
@@ -2250,9 +2250,9 @@ func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
readIdleTimeout := cc.readIdleTimeout
- var t timer
+ var t *time.Timer
if readIdleTimeout != 0 {
- t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
+ t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
}
for {
f, err := cc.fr.ReadFrame()
@@ -2795,6 +2795,7 @@ func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientSt
// See comment on ClientConn.rstStreamPingsBlocked for details.
rl.cc.rstStreamPingsBlocked = false
}
+ rl.cc.readBeforeStreamID = rl.cc.nextStreamID
cs := rl.cc.streams[id]
if cs != nil && !cs.readAborted {
return cs
@@ -2845,6 +2846,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
cc := rl.cc
+ defer cc.maybeCallStateHook()
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -2998,7 +3000,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
var pingError error
errc := make(chan struct{})
go func() {
- cc.t.markNewGoroutine()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if pingError = cc.fr.WritePing(false, p); pingError != nil {
@@ -3026,6 +3027,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
if f.IsAck() {
cc := rl.cc
+ defer cc.maybeCallStateHook()
cc.mu.Lock()
defer cc.mu.Unlock()
// If ack, notify listener if any
@@ -3128,35 +3130,102 @@ type erringRoundTripper struct{ err error }
func (rt erringRoundTripper) RoundTripErr() error { return rt.err }
func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body")
+
// gzipReader wraps a response body so it can lazily
-// call gzip.NewReader on the first call to Read
+// get gzip.Reader from the pool on the first call to Read.
+// After Close is called it puts gzip.Reader to the pool immediately
+// if there is no Read in progress or later when Read completes.
type gzipReader struct {
_ incomparable
body io.ReadCloser // underlying Response.Body
- zr *gzip.Reader // lazily-initialized gzip reader
- zerr error // sticky error
+ mu sync.Mutex // guards zr and zerr
+ zr *gzip.Reader // stores gzip reader from the pool between reads
+ zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close
}
-func (gz *gzipReader) Read(p []byte) (n int, err error) {
+type eofReader struct{}
+
+func (eofReader) Read([]byte) (int, error) { return 0, io.EOF }
+func (eofReader) ReadByte() (byte, error) { return 0, io.EOF }
+
+var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }}
+
+// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r.
+func gzipPoolGet(r io.Reader) (*gzip.Reader, error) {
+ zr := gzipPool.Get().(*gzip.Reader)
+ if err := zr.Reset(r); err != nil {
+ gzipPoolPut(zr)
+ return nil, err
+ }
+ return zr, nil
+}
+
+// gzipPoolPut puts a gzip.Reader back into the pool.
+func gzipPoolPut(zr *gzip.Reader) {
+ // Reset will allocate bufio.Reader if we pass it anything
+ // other than a flate.Reader, so ensure that it's getting one.
+ var r flate.Reader = eofReader{}
+ zr.Reset(r)
+ gzipPool.Put(zr)
+}
+
+// acquire returns a gzip.Reader for reading response body.
+// The reader must be released after use.
+func (gz *gzipReader) acquire() (*gzip.Reader, error) {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
if gz.zerr != nil {
- return 0, gz.zerr
+ return nil, gz.zerr
}
if gz.zr == nil {
- gz.zr, err = gzip.NewReader(gz.body)
- if err != nil {
- gz.zerr = err
- return 0, err
+ gz.zr, gz.zerr = gzipPoolGet(gz.body)
+ if gz.zerr != nil {
+ return nil, gz.zerr
}
}
- return gz.zr.Read(p)
+ ret := gz.zr
+ gz.zr, gz.zerr = nil, errConcurrentReadOnResBody
+ return ret, nil
}
-func (gz *gzipReader) Close() error {
- if err := gz.body.Close(); err != nil {
- return err
+// release returns the gzip.Reader to the pool if Close was called during Read.
+func (gz *gzipReader) release(zr *gzip.Reader) {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
+ if gz.zerr == errConcurrentReadOnResBody {
+ gz.zr, gz.zerr = zr, nil
+ } else { // fs.ErrClosed
+ gzipPoolPut(zr)
+ }
+}
+
+// close returns the gzip.Reader to the pool immediately or
+// signals release to do so after Read completes.
+func (gz *gzipReader) close() {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
+ if gz.zerr == nil && gz.zr != nil {
+ gzipPoolPut(gz.zr)
+ gz.zr = nil
}
gz.zerr = fs.ErrClosed
- return nil
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ zr, err := gz.acquire()
+ if err != nil {
+ return 0, err
+ }
+ defer gz.release(zr)
+
+ return zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ gz.close()
+
+ return gz.body.Close()
}
type errorReader struct{ err error }
@@ -3182,9 +3251,13 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro
}
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
-// if there's already has a cached connection to the host.
+// if there's already a cached connection to the host.
// (The field is exported so it can be accessed via reflect from net/http; tested
// by TestNoDialH2RoundTripperType)
+//
+// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol,
+// and the http1.Transport can use type assertions to call non-RoundTrip methods on it.
+// This lets us expose, for example, NewClientConn to net/http.
type noDialH2RoundTripper struct{ *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
@@ -3195,6 +3268,85 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err
return res, err
}
+func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) {
+ tr := rt.Transport
+ cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook)
+ if err != nil {
+ return nil, err
+ }
+
+ // RoundTrip should block when the conn is at its concurrency limit,
+ // not return an error. Setting strictMaxConcurrentStreams enables this.
+ cc.strictMaxConcurrentStreams = true
+
+ return netHTTPClientConn{cc}, nil
+}
+
+// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from
+// the RoundTripper returned by NewClientConn.
+type netHTTPClientConn struct {
+ cc *ClientConn
+}
+
+func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ return cc.cc.RoundTrip(req)
+}
+
+func (cc netHTTPClientConn) Close() error {
+ return cc.cc.Close()
+}
+
+func (cc netHTTPClientConn) Err() error {
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ if cc.cc.closed {
+ return errors.New("connection closed")
+ }
+ return nil
+}
+
+func (cc netHTTPClientConn) Reserve() error {
+ defer cc.cc.maybeCallStateHook()
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ if !cc.cc.canReserveLocked() {
+ return errors.New("connection is unavailable")
+ }
+ cc.cc.streamsReserved++
+ return nil
+}
+
+func (cc netHTTPClientConn) Release() {
+ defer cc.cc.maybeCallStateHook()
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ // We don't complain if streamsReserved is 0.
+ //
+ // This is consistent with RoundTrip: both Release and RoundTrip will
+ // consume a reservation iff one exists.
+ if cc.cc.streamsReserved > 0 {
+ cc.cc.streamsReserved--
+ }
+}
+
+func (cc netHTTPClientConn) Available() int {
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ return cc.cc.availableLocked()
+}
+
+func (cc netHTTPClientConn) InFlight() int {
+ cc.cc.mu.Lock()
+ defer cc.cc.mu.Unlock()
+ return cc.cc.currentRequestCountLocked()
+}
+
+func (cc *ClientConn) maybeCallStateHook() {
+ if cc.internalStateHook != nil {
+ cc.internalStateHook()
+ }
+}
+
func (t *Transport) idleConnTimeout() time.Duration {
// to keep things backwards compatible, we use non-zero values of
// IdleConnTimeout, followed by using the IdleConnTimeout on the underlying
@@ -3228,7 +3380,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = cc.t.timeSince(cc.lastActive)
+ ci.IdleTime = time.Since(cc.lastActive)
}
cc.mu.Unlock()
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index cc893adc..7de27be5 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -42,6 +42,8 @@ type OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
+ // priority is used to set the priority of the newly opened stream.
+ priority PriorityParam
}
// FrameWriteRequest is a request to write a frame.
@@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
}
// writeQueue is used by implementations of WriteScheduler.
+//
+// Each writeQueue contains a queue of FrameWriteRequests, meant to store all
+// FrameWriteRequests associated with a given stream. This is implemented as a
+// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done
+// by incrementing currPos of currQueue. Adding an item is done by appending it
+// to the nextQueue. If currQueue is empty when trying to remove an item, we
+// can swap currQueue and nextQueue to remedy the situation.
+// This two-stage queue is analogous to the use of two lists in Okasaki's
+// purely functional queue but without the overhead of reversing the list when
+// swapping stages.
+//
+// writeQueue also contains prev and next, this can be used by implementations
+// of WriteScheduler to construct data structures that represent the order of
+// writing between different streams (e.g. circular linked list).
type writeQueue struct {
- s []FrameWriteRequest
+ currQueue []FrameWriteRequest
+ nextQueue []FrameWriteRequest
+ currPos int
+
prev, next *writeQueue
}
-func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+func (q *writeQueue) empty() bool {
+ return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0
+}
func (q *writeQueue) push(wr FrameWriteRequest) {
- q.s = append(q.s, wr)
+ q.nextQueue = append(q.nextQueue, wr)
}
func (q *writeQueue) shift() FrameWriteRequest {
- if len(q.s) == 0 {
+ if q.empty() {
panic("invalid use of queue")
}
- wr := q.s[0]
- // TODO: less copy-happy queue.
- copy(q.s, q.s[1:])
- q.s[len(q.s)-1] = FrameWriteRequest{}
- q.s = q.s[:len(q.s)-1]
+ if q.currPos >= len(q.currQueue) {
+ q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0]
+ }
+ wr := q.currQueue[q.currPos]
+ q.currQueue[q.currPos] = FrameWriteRequest{}
+ q.currPos++
return wr
}
+func (q *writeQueue) peek() *FrameWriteRequest {
+ if q.currPos < len(q.currQueue) {
+ return &q.currQueue[q.currPos]
+ }
+ if len(q.nextQueue) > 0 {
+ return &q.nextQueue[0]
+ }
+ return nil
+}
+
// consume consumes up to n bytes from q.s[0]. If the frame is
// entirely consumed, it is removed from the queue. If the frame
// is partially consumed, the frame is kept with the consumed
// bytes removed. Returns true iff any bytes were consumed.
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
- if len(q.s) == 0 {
+ if q.empty() {
return FrameWriteRequest{}, false
}
- consumed, rest, numresult := q.s[0].Consume(n)
+ consumed, rest, numresult := q.peek().Consume(n)
switch numresult {
case 0:
return FrameWriteRequest{}, false
case 1:
q.shift()
case 2:
- q.s[0] = rest
+ *q.peek() = rest
}
return consumed, true
}
@@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue
// put inserts an unused writeQueue into the pool.
func (p *writeQueuePool) put(q *writeQueue) {
- for i := range q.s {
- q.s[i] = FrameWriteRequest{}
+ for i := range q.currQueue {
+ q.currQueue[i] = FrameWriteRequest{}
+ }
+ for i := range q.nextQueue {
+ q.nextQueue[i] = FrameWriteRequest{}
}
- q.s = q.s[:0]
+ q.currQueue = q.currQueue[:0]
+ q.nextQueue = q.nextQueue[:0]
+ q.currPos = 0
*p = append(*p, q)
}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
similarity index 77%
rename from vendor/golang.org/x/net/http2/writesched_priority.go
rename to vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
index f6783339..4e33c29a 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
@@ -11,7 +11,7 @@ import (
)
// RFC 7540, Section 5.3.5: the default weight is 16.
-const priorityDefaultWeight = 15 // 16 = 15 + 1
+const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type PriorityWriteSchedulerConfig struct {
@@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
}
}
- ws := &priorityWriteScheduler{
- nodes: make(map[uint32]*priorityNode),
+ ws := &priorityWriteSchedulerRFC7540{
+ nodes: make(map[uint32]*priorityNodeRFC7540),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
@@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
return ws
}
-type priorityNodeState int
+type priorityNodeStateRFC7540 int
const (
- priorityNodeOpen priorityNodeState = iota
- priorityNodeClosed
- priorityNodeIdle
+ priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
+ priorityNodeClosedRFC7540
+ priorityNodeIdleRFC7540
)
-// priorityNode is a node in an HTTP/2 priority tree.
+// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3.
-type priorityNode struct {
- q writeQueue // queue of pending frames to write
- id uint32 // id of the stream, or 0 for the root of the tree
- weight uint8 // the actual weight is weight+1, so the value is in [1,256]
- state priorityNodeState // open | closed | idle
- bytes int64 // number of bytes written by this node, or 0 if closed
- subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+type priorityNodeRFC7540 struct {
+ q writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state priorityNodeStateRFC7540 // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree.
- parent *priorityNode
- kids *priorityNode // start of the kids list
- prev, next *priorityNode // doubly-linked list of siblings
+ parent *priorityNodeRFC7540
+ kids *priorityNodeRFC7540 // start of the kids list
+ prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
}
-func (n *priorityNode) setParent(parent *priorityNode) {
+func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
if n == parent {
panic("setParent to self")
}
@@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) {
}
}
-func (n *priorityNode) addBytes(b int64) {
+func (n *priorityNodeRFC7540) addBytes(b int64) {
n.bytes += b
for ; n != nil; n = n.parent {
n.subtreeBytes += b
@@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) {
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node).
-func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
if !n.q.empty() && f(n, openParent) {
return true
}
@@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
// Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames).
if n.id != 0 {
- openParent = openParent || (n.state == priorityNodeOpen)
+ openParent = openParent || (n.state == priorityNodeOpenRFC7540)
}
// Common case: only one kid or all kids have the same weight.
@@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
*tmp = append(*tmp, n.kids)
n.kids.setParent(nil)
}
- sort.Sort(sortPriorityNodeSiblings(*tmp))
+ sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
@@ -207,15 +207,15 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
return false
}
-type sortPriorityNodeSiblings []*priorityNode
+type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
-func (z sortPriorityNodeSiblings) Len() int { return len(z) }
-func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
-func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
+func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
- wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
- wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes)
if bi == 0 && bk == 0 {
return wi >= wk
}
@@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool {
return bi/bk <= wi/wk
}
-type priorityWriteScheduler struct {
+type priorityWriteSchedulerRFC7540 struct {
// root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream.
- root priorityNode
+ root priorityNodeRFC7540
// nodes maps stream ids to priority tree nodes.
- nodes map[uint32]*priorityNode
+ nodes map[uint32]*priorityNodeRFC7540
// maxID is the maximum stream id in nodes.
maxID uint32
@@ -239,7 +239,7 @@ type priorityWriteScheduler struct {
// lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
- closedNodes, idleNodes []*priorityNode
+ closedNodes, idleNodes []*priorityNodeRFC7540
// From the config.
maxClosedNodesInTree int
@@ -248,19 +248,19 @@ type priorityWriteScheduler struct {
enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
- tmp []*priorityNode
+ tmp []*priorityNodeRFC7540
// pool of empty queues for reuse.
queuePool writeQueuePool
}
-func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
- if curr.state != priorityNodeIdle {
+ if curr.state != priorityNodeIdleRFC7540 {
panic(fmt.Sprintf("stream %d already opened", streamID))
}
- curr.state = priorityNodeOpen
+ curr.state = priorityNodeOpenRFC7540
return
}
@@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
if parent == nil {
parent = &ws.root
}
- n := &priorityNode{
+ n := &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeOpen,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeOpenRFC7540,
}
n.setParent(parent)
ws.nodes[streamID] = n
@@ -285,24 +285,23 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
}
}
-func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0")
}
if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
}
- if ws.nodes[streamID].state != priorityNodeOpen {
+ if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
}
n := ws.nodes[streamID]
- n.state = priorityNodeClosed
+ n.state = priorityNodeClosedRFC7540
n.addBytes(-n.bytes)
q := n.q
ws.queuePool.put(&q)
- n.q.s = nil
if ws.maxClosedNodesInTree > 0 {
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
} else {
@@ -310,7 +309,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
}
}
-func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
if streamID == 0 {
panic("adjustPriority on root")
}
@@ -324,11 +323,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
return
}
ws.maxID = streamID
- n = &priorityNode{
+ n = &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeIdle,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeIdleRFC7540,
}
n.setParent(&ws.root)
ws.nodes[streamID] = n
@@ -340,7 +339,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
- n.weight = priorityDefaultWeight
+ n.weight = priorityDefaultWeightRFC7540
return
}
@@ -381,8 +380,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
n.weight = priority.Weight
}
-func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
- var n *priorityNode
+func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
+ var n *priorityNodeRFC7540
if wr.isControl() {
n = &ws.root
} else {
@@ -401,8 +400,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
n.q.push(wr)
}
-func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
- ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
limit := int32(math.MaxInt32)
if openParent {
limit = ws.writeThrottleLimit
@@ -428,7 +427,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
return wr, ok
}
-func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
if maxSize == 0 {
return
}
@@ -442,7 +441,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
*list = append(*list, n)
}
-func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
for n.kids != nil {
n.kids.setParent(n.parent)
}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
new file mode 100644
index 00000000..dfbfc1eb
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
@@ -0,0 +1,224 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+)
+
+type streamMetadata struct {
+ location *writeQueue
+ priority PriorityParam
+}
+
+type priorityWriteSchedulerRFC9218 struct {
+ // control contains control frames (SETTINGS, PING, etc.).
+ control writeQueue
+
+ // heads contain the head of a circular list of streams.
+ // We put these heads within a nested array that represents urgency and
+ // incremental, as defined in
+ // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
+ // 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
+ heads [8][2]*writeQueue
+
+ // streams contains a mapping between each stream ID and their metadata, so
+ // we can quickly locate them when needing to, for example, adjust their
+ // priority.
+ streams map[uint32]streamMetadata
+
+ // queuePool are empty queues for reuse.
+ queuePool writeQueuePool
+
+ // prioritizeIncremental is used to determine whether we should prioritize
+ // incremental streams or not, when urgency is the same in a given Pop()
+ // call.
+ prioritizeIncremental bool
+
+ // priorityUpdateBuf is used to buffer the most recent PRIORITY_UPDATE we
+ // receive per https://www.rfc-editor.org/rfc/rfc9218.html#name-the-priority_update-frame.
+ priorityUpdateBuf struct {
+ // streamID being 0 means that the buffer is empty. This is a safe
+ // assumption as PRIORITY_UPDATE for stream 0 is a PROTOCOL_ERROR.
+ streamID uint32
+ priority PriorityParam
+ }
+}
+
+func newPriorityWriteSchedulerRFC9218() WriteScheduler {
+ ws := &priorityWriteSchedulerRFC9218{
+ streams: make(map[uint32]streamMetadata),
+ }
+ return ws
+}
+
+func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
+ if ws.streams[streamID].location != nil {
+ panic(fmt.Errorf("stream %d already opened", streamID))
+ }
+ if streamID == ws.priorityUpdateBuf.streamID {
+ ws.priorityUpdateBuf.streamID = 0
+ opt.priority = ws.priorityUpdateBuf.priority
+ }
+ q := ws.queuePool.get()
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: opt.priority,
+ }
+
+ u, i := opt.priority.urgency, opt.priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ return
+ }
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+ delete(ws.streams, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ ws.priorityUpdateBuf.streamID = streamID
+ ws.priorityUpdateBuf.priority = priority
+ return
+ }
+
+ // Remove stream from current location.
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+
+ // Insert stream to the new queue.
+ u, i = priority.urgency, priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+
+ // Update the metadata.
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: priority,
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
+ if wr.isControl() {
+ ws.control.push(wr)
+ return
+ }
+ q := ws.streams[wr.StreamID()].location
+ if q == nil {
+ // This is a closed stream.
+ // wr should not be a HEADERS or DATA frame.
+ // We push the request onto the control queue.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ ws.control.push(wr)
+ return
+ }
+ q.push(wr)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.control.empty() {
+ return ws.control.shift(), true
+ }
+
+ // On the next Pop(), we want to prioritize incremental if we prioritized
+ // non-incremental request of the same urgency this time. Vice-versa.
+ // i.e. when there are incremental and non-incremental requests at the same
+ // priority, we give 50% of our bandwidth to the incremental ones in
+ // aggregate and 50% to the first non-incremental one (since
+ // non-incremental streams do not use round-robin writes).
+ ws.prioritizeIncremental = !ws.prioritizeIncremental
+
+ // Always prioritize lowest u (i.e. highest urgency level).
+ for u := range ws.heads {
+ for i := range ws.heads[u] {
+ // When we want to prioritize incremental, we try to pop i=true
+ // first before i=false when u is the same.
+ if ws.prioritizeIncremental {
+ i = (i + 1) % 2
+ }
+ q := ws.heads[u][i]
+ if q == nil {
+ continue
+ }
+ for {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ if i == 1 {
+ // For incremental streams, we update head to q.next so
+ // we can round-robin between multiple streams that can
+ // immediately benefit from partial writes.
+ ws.heads[u][i] = q.next
+ } else {
+ // For non-incremental streams, we try to finish one to
+ // completion rather than doing round-robin. However,
+ // we update head here so that if q.consume() is !ok
+ // (e.g. the stream has no more frame to consume), head
+ // is updated to the next q that has frames to consume
+ // on future iterations. This way, we do not prioritize
+ // writing to unavailable stream on next Pop() calls,
+ // preventing head-of-line blocking.
+ ws.heads[u][i] = q
+ }
+ return wr, true
+ }
+ q = q.next
+ if q == ws.heads[u][i] {
+ break
+ }
+ }
+
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
index 54fe8632..737cff9e 100644
--- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go
+++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct {
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
-// The round robin scheduler priorizes control frames
+// The round robin scheduler prioritizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
index 4b705531..1e10f89e 100644
--- a/vendor/golang.org/x/net/internal/httpcommon/request.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -51,7 +51,7 @@ type EncodeHeadersParam struct {
DefaultUserAgent string
}
-// EncodeHeadersParam is the result of EncodeHeaders.
+// EncodeHeadersResult is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
@@ -399,7 +399,7 @@ type ServerRequestResult struct {
// If the request should be rejected, this is a short string suitable for passing
// to the http2 package's CountError function.
- // It might be a bit odd to return errors this way rather than returing an error,
+ // It might be a bit odd to return errors this way rather than returning an error,
// but this ensures we don't forget to include a CountError reason.
InvalidReason string
}
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
index 3aaffdd1..c2b3c009 100644
--- a/vendor/golang.org/x/net/trace/events.go
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -58,8 +58,8 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
Buckets: buckets,
}
- data.Families = make([]string, 0, len(families))
famMu.RLock()
+ data.Families = make([]string, 0, len(families))
for name := range families {
data.Families = append(data.Families, name)
}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s
index 22cc9984..3b0450a0 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s
@@ -9,31 +9,27 @@
// func getisar0() uint64
TEXT ·getisar0(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 0 into x0
- // mrs x0, ID_AA64ISAR0_EL1 = d5380600
- WORD $0xd5380600
+ MRS ID_AA64ISAR0_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getisar1() uint64
TEXT ·getisar1(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 1 into x0
- // mrs x0, ID_AA64ISAR1_EL1 = d5380620
- WORD $0xd5380620
+ MRS ID_AA64ISAR1_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getpfr0() uint64
TEXT ·getpfr0(SB),NOSPLIT,$0-8
// get Processor Feature Register 0 into x0
- // mrs x0, ID_AA64PFR0_EL1 = d5380400
- WORD $0xd5380400
+ MRS ID_AA64PFR0_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getzfr0() uint64
TEXT ·getzfr0(SB),NOSPLIT,$0-8
// get SVE Feature Register 0 into x0
- // mrs x0, ID_AA64ZFR0_EL1 = d5380480
- WORD $0xd5380480
+ MRS ID_AA64ZFR0_EL1, R0
MOVD R0, ret+0(FP)
RET
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go
index 1e642f33..f5723d4f 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go
@@ -64,6 +64,80 @@ func initOptions() {
func archInit() {
+ // From internal/cpu
+ const (
+ // eax bits
+ cpuid_AVXVNNI = 1 << 4
+
+ // ecx bits
+ cpuid_SSE3 = 1 << 0
+ cpuid_PCLMULQDQ = 1 << 1
+ cpuid_AVX512VBMI = 1 << 1
+ cpuid_AVX512VBMI2 = 1 << 6
+ cpuid_SSSE3 = 1 << 9
+ cpuid_AVX512GFNI = 1 << 8
+ cpuid_AVX512VAES = 1 << 9
+ cpuid_AVX512VNNI = 1 << 11
+ cpuid_AVX512BITALG = 1 << 12
+ cpuid_FMA = 1 << 12
+ cpuid_AVX512VPOPCNTDQ = 1 << 14
+ cpuid_SSE41 = 1 << 19
+ cpuid_SSE42 = 1 << 20
+ cpuid_POPCNT = 1 << 23
+ cpuid_AES = 1 << 25
+ cpuid_OSXSAVE = 1 << 27
+ cpuid_AVX = 1 << 28
+
+ // "Extended Feature Flag" bits returned in EBX for CPUID EAX=0x7 ECX=0x0
+ cpuid_BMI1 = 1 << 3
+ cpuid_AVX2 = 1 << 5
+ cpuid_BMI2 = 1 << 8
+ cpuid_ERMS = 1 << 9
+ cpuid_AVX512F = 1 << 16
+ cpuid_AVX512DQ = 1 << 17
+ cpuid_ADX = 1 << 19
+ cpuid_AVX512CD = 1 << 28
+ cpuid_SHA = 1 << 29
+ cpuid_AVX512BW = 1 << 30
+ cpuid_AVX512VL = 1 << 31
+
+ // "Extended Feature Flag" bits returned in ECX for CPUID EAX=0x7 ECX=0x0
+ cpuid_AVX512_VBMI = 1 << 1
+ cpuid_AVX512_VBMI2 = 1 << 6
+ cpuid_GFNI = 1 << 8
+ cpuid_AVX512VPCLMULQDQ = 1 << 10
+ cpuid_AVX512_BITALG = 1 << 12
+
+ // edx bits
+ cpuid_FSRM = 1 << 4
+ // edx bits for CPUID 0x80000001
+ cpuid_RDTSCP = 1 << 27
+ )
+ // Additional constants not in internal/cpu
+ const (
+ // eax=1: edx
+ cpuid_SSE2 = 1 << 26
+ // eax=1: ecx
+ cpuid_CX16 = 1 << 13
+ cpuid_RDRAND = 1 << 30
+ // eax=7,ecx=0: ebx
+ cpuid_RDSEED = 1 << 18
+ cpuid_AVX512IFMA = 1 << 21
+ cpuid_AVX512PF = 1 << 26
+ cpuid_AVX512ER = 1 << 27
+ // eax=7,ecx=0: edx
+ cpuid_AVX5124VNNIW = 1 << 2
+ cpuid_AVX5124FMAPS = 1 << 3
+ cpuid_AMXBF16 = 1 << 22
+ cpuid_AMXTile = 1 << 24
+ cpuid_AMXInt8 = 1 << 25
+ // eax=7,ecx=1: eax
+ cpuid_AVX512BF16 = 1 << 5
+ cpuid_AVXIFMA = 1 << 23
+ // eax=7,ecx=1: edx
+ cpuid_AVXVNNIInt8 = 1 << 4
+ )
+
Initialized = true
maxID, _, _, _ := cpuid(0, 0)
@@ -73,90 +147,90 @@ func archInit() {
}
_, _, ecx1, edx1 := cpuid(1, 0)
- X86.HasSSE2 = isSet(26, edx1)
-
- X86.HasSSE3 = isSet(0, ecx1)
- X86.HasPCLMULQDQ = isSet(1, ecx1)
- X86.HasSSSE3 = isSet(9, ecx1)
- X86.HasFMA = isSet(12, ecx1)
- X86.HasCX16 = isSet(13, ecx1)
- X86.HasSSE41 = isSet(19, ecx1)
- X86.HasSSE42 = isSet(20, ecx1)
- X86.HasPOPCNT = isSet(23, ecx1)
- X86.HasAES = isSet(25, ecx1)
- X86.HasOSXSAVE = isSet(27, ecx1)
- X86.HasRDRAND = isSet(30, ecx1)
+ X86.HasSSE2 = isSet(edx1, cpuid_SSE2)
+
+ X86.HasSSE3 = isSet(ecx1, cpuid_SSE3)
+ X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ)
+ X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3)
+ X86.HasFMA = isSet(ecx1, cpuid_FMA)
+ X86.HasCX16 = isSet(ecx1, cpuid_CX16)
+ X86.HasSSE41 = isSet(ecx1, cpuid_SSE41)
+ X86.HasSSE42 = isSet(ecx1, cpuid_SSE42)
+ X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT)
+ X86.HasAES = isSet(ecx1, cpuid_AES)
+ X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE)
+ X86.HasRDRAND = isSet(ecx1, cpuid_RDRAND)
var osSupportsAVX, osSupportsAVX512 bool
// For XGETBV, OSXSAVE bit is required and sufficient.
if X86.HasOSXSAVE {
eax, _ := xgetbv()
// Check if XMM and YMM registers have OS support.
- osSupportsAVX = isSet(1, eax) && isSet(2, eax)
+ osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2)
if runtime.GOOS == "darwin" {
// Darwin requires special AVX512 checks, see cpu_darwin_x86.go
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
} else {
// Check if OPMASK and ZMM registers have OS support.
- osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
+ osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7)
}
}
- X86.HasAVX = isSet(28, ecx1) && osSupportsAVX
+ X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX
if maxID < 7 {
return
}
eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
- X86.HasBMI1 = isSet(3, ebx7)
- X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
- X86.HasBMI2 = isSet(8, ebx7)
- X86.HasERMS = isSet(9, ebx7)
- X86.HasRDSEED = isSet(18, ebx7)
- X86.HasADX = isSet(19, ebx7)
-
- X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
+ X86.HasBMI1 = isSet(ebx7, cpuid_BMI1)
+ X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX
+ X86.HasBMI2 = isSet(ebx7, cpuid_BMI2)
+ X86.HasERMS = isSet(ebx7, cpuid_ERMS)
+ X86.HasRDSEED = isSet(ebx7, cpuid_RDSEED)
+ X86.HasADX = isSet(ebx7, cpuid_ADX)
+
+ X86.HasAVX512 = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
if X86.HasAVX512 {
X86.HasAVX512F = true
- X86.HasAVX512CD = isSet(28, ebx7)
- X86.HasAVX512ER = isSet(27, ebx7)
- X86.HasAVX512PF = isSet(26, ebx7)
- X86.HasAVX512VL = isSet(31, ebx7)
- X86.HasAVX512BW = isSet(30, ebx7)
- X86.HasAVX512DQ = isSet(17, ebx7)
- X86.HasAVX512IFMA = isSet(21, ebx7)
- X86.HasAVX512VBMI = isSet(1, ecx7)
- X86.HasAVX5124VNNIW = isSet(2, edx7)
- X86.HasAVX5124FMAPS = isSet(3, edx7)
- X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7)
- X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7)
- X86.HasAVX512VNNI = isSet(11, ecx7)
- X86.HasAVX512GFNI = isSet(8, ecx7)
- X86.HasAVX512VAES = isSet(9, ecx7)
- X86.HasAVX512VBMI2 = isSet(6, ecx7)
- X86.HasAVX512BITALG = isSet(12, ecx7)
+ X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD)
+ X86.HasAVX512ER = isSet(ebx7, cpuid_AVX512ER)
+ X86.HasAVX512PF = isSet(ebx7, cpuid_AVX512PF)
+ X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL)
+ X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW)
+ X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ)
+ X86.HasAVX512IFMA = isSet(ebx7, cpuid_AVX512IFMA)
+ X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI)
+ X86.HasAVX5124VNNIW = isSet(edx7, cpuid_AVX5124VNNIW)
+ X86.HasAVX5124FMAPS = isSet(edx7, cpuid_AVX5124FMAPS)
+ X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ)
+ X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ)
+ X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI)
+ X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI)
+ X86.HasAVX512VAES = isSet(ecx7, cpuid_AVX512VAES)
+ X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2)
+ X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG)
}
- X86.HasAMXTile = isSet(24, edx7)
- X86.HasAMXInt8 = isSet(25, edx7)
- X86.HasAMXBF16 = isSet(22, edx7)
+ X86.HasAMXTile = isSet(edx7, cpuid_AMXTile)
+ X86.HasAMXInt8 = isSet(edx7, cpuid_AMXInt8)
+ X86.HasAMXBF16 = isSet(edx7, cpuid_AMXBF16)
// These features depend on the second level of extended features.
if eax7 >= 1 {
eax71, _, _, edx71 := cpuid(7, 1)
if X86.HasAVX512 {
- X86.HasAVX512BF16 = isSet(5, eax71)
+ X86.HasAVX512BF16 = isSet(eax71, cpuid_AVX512BF16)
}
if X86.HasAVX {
- X86.HasAVXIFMA = isSet(23, eax71)
- X86.HasAVXVNNI = isSet(4, eax71)
- X86.HasAVXVNNIInt8 = isSet(4, edx71)
+ X86.HasAVXIFMA = isSet(eax71, cpuid_AVXIFMA)
+ X86.HasAVXVNNI = isSet(eax71, cpuid_AVXVNNI)
+ X86.HasAVXVNNIInt8 = isSet(edx71, cpuid_AVXVNNIInt8)
}
}
}
-func isSet(bitpos uint, value uint32) bool {
- return value&(1<
#include
#include
+#include
#include
#include
#include
@@ -255,6 +256,7 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
#include
@@ -529,6 +531,7 @@ ccflags="$@"
$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
+ $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ ||
$2 ~ /^O?XTABS$/ ||
$2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ ||
@@ -611,7 +614,7 @@ ccflags="$@"
$2 !~ /IOC_MAGIC/ &&
$2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ ||
$2 ~ /^(VM|VMADDR)_/ ||
- $2 ~ /^IOCTL_VM_SOCKETS_/ ||
+ $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ ||
$2 ~ /^(TASKSTATS|TS)_/ ||
$2 ~ /^CGROUPSTATS_/ ||
$2 ~ /^GENL_/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 9439af96..06c0eea6 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -2643,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error)
//sys Mseal(b []byte, flags uint) (err error)
+
+//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY
+
+func SetMemPolicy(mode int, mask *CPUSet) error {
+ return setMemPolicy(mode, mask, _CPU_SETSIZE)
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index b6db27d9..120a7b35 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -853,20 +853,86 @@ const (
DM_VERSION_MAJOR = 0x4
DM_VERSION_MINOR = 0x32
DM_VERSION_PATCHLEVEL = 0x0
+ DT_ADDRRNGHI = 0x6ffffeff
+ DT_ADDRRNGLO = 0x6ffffe00
DT_BLK = 0x6
DT_CHR = 0x2
+ DT_DEBUG = 0x15
DT_DIR = 0x4
+ DT_ENCODING = 0x20
DT_FIFO = 0x1
+ DT_FINI = 0xd
+ DT_FLAGS_1 = 0x6ffffffb
+ DT_GNU_HASH = 0x6ffffef5
+ DT_HASH = 0x4
+ DT_HIOS = 0x6ffff000
+ DT_HIPROC = 0x7fffffff
+ DT_INIT = 0xc
+ DT_JMPREL = 0x17
DT_LNK = 0xa
+ DT_LOOS = 0x6000000d
+ DT_LOPROC = 0x70000000
+ DT_NEEDED = 0x1
+ DT_NULL = 0x0
+ DT_PLTGOT = 0x3
+ DT_PLTREL = 0x14
+ DT_PLTRELSZ = 0x2
DT_REG = 0x8
+ DT_REL = 0x11
+ DT_RELA = 0x7
+ DT_RELACOUNT = 0x6ffffff9
+ DT_RELAENT = 0x9
+ DT_RELASZ = 0x8
+ DT_RELCOUNT = 0x6ffffffa
+ DT_RELENT = 0x13
+ DT_RELSZ = 0x12
+ DT_RPATH = 0xf
DT_SOCK = 0xc
+ DT_SONAME = 0xe
+ DT_STRSZ = 0xa
+ DT_STRTAB = 0x5
+ DT_SYMBOLIC = 0x10
+ DT_SYMENT = 0xb
+ DT_SYMTAB = 0x6
+ DT_TEXTREL = 0x16
DT_UNKNOWN = 0x0
+ DT_VALRNGHI = 0x6ffffdff
+ DT_VALRNGLO = 0x6ffffd00
+ DT_VERDEF = 0x6ffffffc
+ DT_VERDEFNUM = 0x6ffffffd
+ DT_VERNEED = 0x6ffffffe
+ DT_VERNEEDNUM = 0x6fffffff
+ DT_VERSYM = 0x6ffffff0
DT_WHT = 0xe
ECHO = 0x8
ECRYPTFS_SUPER_MAGIC = 0xf15f
EFD_SEMAPHORE = 0x1
EFIVARFS_MAGIC = 0xde5e81e4
EFS_SUPER_MAGIC = 0x414a53
+ EI_CLASS = 0x4
+ EI_DATA = 0x5
+ EI_MAG0 = 0x0
+ EI_MAG1 = 0x1
+ EI_MAG2 = 0x2
+ EI_MAG3 = 0x3
+ EI_NIDENT = 0x10
+ EI_OSABI = 0x7
+ EI_PAD = 0x8
+ EI_VERSION = 0x6
+ ELFCLASS32 = 0x1
+ ELFCLASS64 = 0x2
+ ELFCLASSNONE = 0x0
+ ELFCLASSNUM = 0x3
+ ELFDATA2LSB = 0x1
+ ELFDATA2MSB = 0x2
+ ELFDATANONE = 0x0
+ ELFMAG = "\177ELF"
+ ELFMAG0 = 0x7f
+ ELFMAG1 = 'E'
+ ELFMAG2 = 'L'
+ ELFMAG3 = 'F'
+ ELFOSABI_LINUX = 0x3
+ ELFOSABI_NONE = 0x0
EM_386 = 0x3
EM_486 = 0x6
EM_68K = 0x4
@@ -1152,14 +1218,24 @@ const (
ETH_P_WCCP = 0x883e
ETH_P_X25 = 0x805
ETH_P_XDSA = 0xf8
+ ET_CORE = 0x4
+ ET_DYN = 0x3
+ ET_EXEC = 0x2
+ ET_HIPROC = 0xffff
+ ET_LOPROC = 0xff00
+ ET_NONE = 0x0
+ ET_REL = 0x1
EV_ABS = 0x3
EV_CNT = 0x20
+ EV_CURRENT = 0x1
EV_FF = 0x15
EV_FF_STATUS = 0x17
EV_KEY = 0x1
EV_LED = 0x11
EV_MAX = 0x1f
EV_MSC = 0x4
+ EV_NONE = 0x0
+ EV_NUM = 0x2
EV_PWR = 0x16
EV_REL = 0x2
EV_REP = 0x14
@@ -1539,6 +1615,8 @@ const (
IN_OPEN = 0x20
IN_Q_OVERFLOW = 0x4000
IN_UNMOUNT = 0x2000
+ IOCTL_MEI_CONNECT_CLIENT = 0xc0104801
+ IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804
IPPROTO_AH = 0x33
IPPROTO_BEETPH = 0x5e
IPPROTO_COMP = 0x6c
@@ -2276,7 +2354,167 @@ const (
NLM_F_REPLACE = 0x100
NLM_F_REQUEST = 0x1
NLM_F_ROOT = 0x100
+ NN_386_IOPERM = "LINUX"
+ NN_386_TLS = "LINUX"
+ NN_ARC_V2 = "LINUX"
+ NN_ARM_FPMR = "LINUX"
+ NN_ARM_GCS = "LINUX"
+ NN_ARM_HW_BREAK = "LINUX"
+ NN_ARM_HW_WATCH = "LINUX"
+ NN_ARM_PACA_KEYS = "LINUX"
+ NN_ARM_PACG_KEYS = "LINUX"
+ NN_ARM_PAC_ENABLED_KEYS = "LINUX"
+ NN_ARM_PAC_MASK = "LINUX"
+ NN_ARM_POE = "LINUX"
+ NN_ARM_SSVE = "LINUX"
+ NN_ARM_SVE = "LINUX"
+ NN_ARM_SYSTEM_CALL = "LINUX"
+ NN_ARM_TAGGED_ADDR_CTRL = "LINUX"
+ NN_ARM_TLS = "LINUX"
+ NN_ARM_VFP = "LINUX"
+ NN_ARM_ZA = "LINUX"
+ NN_ARM_ZT = "LINUX"
+ NN_AUXV = "CORE"
+ NN_FILE = "CORE"
+ NN_GNU_PROPERTY_TYPE_0 = "GNU"
+ NN_LOONGARCH_CPUCFG = "LINUX"
+ NN_LOONGARCH_CSR = "LINUX"
+ NN_LOONGARCH_HW_BREAK = "LINUX"
+ NN_LOONGARCH_HW_WATCH = "LINUX"
+ NN_LOONGARCH_LASX = "LINUX"
+ NN_LOONGARCH_LBT = "LINUX"
+ NN_LOONGARCH_LSX = "LINUX"
+ NN_MIPS_DSP = "LINUX"
+ NN_MIPS_FP_MODE = "LINUX"
+ NN_MIPS_MSA = "LINUX"
+ NN_PPC_DEXCR = "LINUX"
+ NN_PPC_DSCR = "LINUX"
+ NN_PPC_EBB = "LINUX"
+ NN_PPC_HASHKEYR = "LINUX"
+ NN_PPC_PKEY = "LINUX"
+ NN_PPC_PMU = "LINUX"
+ NN_PPC_PPR = "LINUX"
+ NN_PPC_SPE = "LINUX"
+ NN_PPC_TAR = "LINUX"
+ NN_PPC_TM_CDSCR = "LINUX"
+ NN_PPC_TM_CFPR = "LINUX"
+ NN_PPC_TM_CGPR = "LINUX"
+ NN_PPC_TM_CPPR = "LINUX"
+ NN_PPC_TM_CTAR = "LINUX"
+ NN_PPC_TM_CVMX = "LINUX"
+ NN_PPC_TM_CVSX = "LINUX"
+ NN_PPC_TM_SPR = "LINUX"
+ NN_PPC_VMX = "LINUX"
+ NN_PPC_VSX = "LINUX"
+ NN_PRFPREG = "CORE"
+ NN_PRPSINFO = "CORE"
+ NN_PRSTATUS = "CORE"
+ NN_PRXFPREG = "LINUX"
+ NN_RISCV_CSR = "LINUX"
+ NN_RISCV_TAGGED_ADDR_CTRL = "LINUX"
+ NN_RISCV_VECTOR = "LINUX"
+ NN_S390_CTRS = "LINUX"
+ NN_S390_GS_BC = "LINUX"
+ NN_S390_GS_CB = "LINUX"
+ NN_S390_HIGH_GPRS = "LINUX"
+ NN_S390_LAST_BREAK = "LINUX"
+ NN_S390_PREFIX = "LINUX"
+ NN_S390_PV_CPU_DATA = "LINUX"
+ NN_S390_RI_CB = "LINUX"
+ NN_S390_SYSTEM_CALL = "LINUX"
+ NN_S390_TDB = "LINUX"
+ NN_S390_TIMER = "LINUX"
+ NN_S390_TODCMP = "LINUX"
+ NN_S390_TODPREG = "LINUX"
+ NN_S390_VXRS_HIGH = "LINUX"
+ NN_S390_VXRS_LOW = "LINUX"
+ NN_SIGINFO = "CORE"
+ NN_TASKSTRUCT = "CORE"
+ NN_VMCOREDD = "LINUX"
+ NN_X86_SHSTK = "LINUX"
+ NN_X86_XSAVE_LAYOUT = "LINUX"
+ NN_X86_XSTATE = "LINUX"
NSFS_MAGIC = 0x6e736673
+ NT_386_IOPERM = 0x201
+ NT_386_TLS = 0x200
+ NT_ARC_V2 = 0x600
+ NT_ARM_FPMR = 0x40e
+ NT_ARM_GCS = 0x410
+ NT_ARM_HW_BREAK = 0x402
+ NT_ARM_HW_WATCH = 0x403
+ NT_ARM_PACA_KEYS = 0x407
+ NT_ARM_PACG_KEYS = 0x408
+ NT_ARM_PAC_ENABLED_KEYS = 0x40a
+ NT_ARM_PAC_MASK = 0x406
+ NT_ARM_POE = 0x40f
+ NT_ARM_SSVE = 0x40b
+ NT_ARM_SVE = 0x405
+ NT_ARM_SYSTEM_CALL = 0x404
+ NT_ARM_TAGGED_ADDR_CTRL = 0x409
+ NT_ARM_TLS = 0x401
+ NT_ARM_VFP = 0x400
+ NT_ARM_ZA = 0x40c
+ NT_ARM_ZT = 0x40d
+ NT_AUXV = 0x6
+ NT_FILE = 0x46494c45
+ NT_GNU_PROPERTY_TYPE_0 = 0x5
+ NT_LOONGARCH_CPUCFG = 0xa00
+ NT_LOONGARCH_CSR = 0xa01
+ NT_LOONGARCH_HW_BREAK = 0xa05
+ NT_LOONGARCH_HW_WATCH = 0xa06
+ NT_LOONGARCH_LASX = 0xa03
+ NT_LOONGARCH_LBT = 0xa04
+ NT_LOONGARCH_LSX = 0xa02
+ NT_MIPS_DSP = 0x800
+ NT_MIPS_FP_MODE = 0x801
+ NT_MIPS_MSA = 0x802
+ NT_PPC_DEXCR = 0x111
+ NT_PPC_DSCR = 0x105
+ NT_PPC_EBB = 0x106
+ NT_PPC_HASHKEYR = 0x112
+ NT_PPC_PKEY = 0x110
+ NT_PPC_PMU = 0x107
+ NT_PPC_PPR = 0x104
+ NT_PPC_SPE = 0x101
+ NT_PPC_TAR = 0x103
+ NT_PPC_TM_CDSCR = 0x10f
+ NT_PPC_TM_CFPR = 0x109
+ NT_PPC_TM_CGPR = 0x108
+ NT_PPC_TM_CPPR = 0x10e
+ NT_PPC_TM_CTAR = 0x10d
+ NT_PPC_TM_CVMX = 0x10a
+ NT_PPC_TM_CVSX = 0x10b
+ NT_PPC_TM_SPR = 0x10c
+ NT_PPC_VMX = 0x100
+ NT_PPC_VSX = 0x102
+ NT_PRFPREG = 0x2
+ NT_PRPSINFO = 0x3
+ NT_PRSTATUS = 0x1
+ NT_PRXFPREG = 0x46e62b7f
+ NT_RISCV_CSR = 0x900
+ NT_RISCV_TAGGED_ADDR_CTRL = 0x902
+ NT_RISCV_VECTOR = 0x901
+ NT_S390_CTRS = 0x304
+ NT_S390_GS_BC = 0x30c
+ NT_S390_GS_CB = 0x30b
+ NT_S390_HIGH_GPRS = 0x300
+ NT_S390_LAST_BREAK = 0x306
+ NT_S390_PREFIX = 0x305
+ NT_S390_PV_CPU_DATA = 0x30e
+ NT_S390_RI_CB = 0x30d
+ NT_S390_SYSTEM_CALL = 0x307
+ NT_S390_TDB = 0x308
+ NT_S390_TIMER = 0x301
+ NT_S390_TODCMP = 0x302
+ NT_S390_TODPREG = 0x303
+ NT_S390_VXRS_HIGH = 0x30a
+ NT_S390_VXRS_LOW = 0x309
+ NT_SIGINFO = 0x53494749
+ NT_TASKSTRUCT = 0x4
+ NT_VMCOREDD = 0x700
+ NT_X86_SHSTK = 0x204
+ NT_X86_XSAVE_LAYOUT = 0x205
+ NT_X86_XSTATE = 0x202
OCFS2_SUPER_MAGIC = 0x7461636f
OCRNL = 0x8
OFDEL = 0x80
@@ -2463,6 +2701,59 @@ const (
PERF_RECORD_MISC_USER = 0x2
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
+ PF_ALG = 0x26
+ PF_APPLETALK = 0x5
+ PF_ASH = 0x12
+ PF_ATMPVC = 0x8
+ PF_ATMSVC = 0x14
+ PF_AX25 = 0x3
+ PF_BLUETOOTH = 0x1f
+ PF_BRIDGE = 0x7
+ PF_CAIF = 0x25
+ PF_CAN = 0x1d
+ PF_DECnet = 0xc
+ PF_ECONET = 0x13
+ PF_FILE = 0x1
+ PF_IB = 0x1b
+ PF_IEEE802154 = 0x24
+ PF_INET = 0x2
+ PF_INET6 = 0xa
+ PF_IPX = 0x4
+ PF_IRDA = 0x17
+ PF_ISDN = 0x22
+ PF_IUCV = 0x20
+ PF_KCM = 0x29
+ PF_KEY = 0xf
+ PF_LLC = 0x1a
+ PF_LOCAL = 0x1
+ PF_MAX = 0x2e
+ PF_MCTP = 0x2d
+ PF_MPLS = 0x1c
+ PF_NETBEUI = 0xd
+ PF_NETLINK = 0x10
+ PF_NETROM = 0x6
+ PF_NFC = 0x27
+ PF_PACKET = 0x11
+ PF_PHONET = 0x23
+ PF_PPPOX = 0x18
+ PF_QIPCRTR = 0x2a
+ PF_R = 0x4
+ PF_RDS = 0x15
+ PF_ROSE = 0xb
+ PF_ROUTE = 0x10
+ PF_RXRPC = 0x21
+ PF_SECURITY = 0xe
+ PF_SMC = 0x2b
+ PF_SNA = 0x16
+ PF_TIPC = 0x1e
+ PF_UNIX = 0x1
+ PF_UNSPEC = 0x0
+ PF_VSOCK = 0x28
+ PF_W = 0x2
+ PF_WANPIPE = 0x19
+ PF_X = 0x1
+ PF_X25 = 0x9
+ PF_XDP = 0x2c
PID_FS_MAGIC = 0x50494446
PIPEFS_MAGIC = 0x50495045
PPPIOCGNPMODE = 0xc008744c
@@ -2758,6 +3049,23 @@ const (
PTRACE_SYSCALL_INFO_NONE = 0x0
PTRACE_SYSCALL_INFO_SECCOMP = 0x3
PTRACE_TRACEME = 0x0
+ PT_AARCH64_MEMTAG_MTE = 0x70000002
+ PT_DYNAMIC = 0x2
+ PT_GNU_EH_FRAME = 0x6474e550
+ PT_GNU_PROPERTY = 0x6474e553
+ PT_GNU_RELRO = 0x6474e552
+ PT_GNU_STACK = 0x6474e551
+ PT_HIOS = 0x6fffffff
+ PT_HIPROC = 0x7fffffff
+ PT_INTERP = 0x3
+ PT_LOAD = 0x1
+ PT_LOOS = 0x60000000
+ PT_LOPROC = 0x70000000
+ PT_NOTE = 0x4
+ PT_NULL = 0x0
+ PT_PHDR = 0x6
+ PT_SHLIB = 0x5
+ PT_TLS = 0x7
P_ALL = 0x0
P_PGID = 0x2
P_PID = 0x1
@@ -3091,6 +3399,47 @@ const (
SEEK_MAX = 0x4
SEEK_SET = 0x0
SELINUX_MAGIC = 0xf97cff8c
+ SHF_ALLOC = 0x2
+ SHF_EXCLUDE = 0x8000000
+ SHF_EXECINSTR = 0x4
+ SHF_GROUP = 0x200
+ SHF_INFO_LINK = 0x40
+ SHF_LINK_ORDER = 0x80
+ SHF_MASKOS = 0xff00000
+ SHF_MASKPROC = 0xf0000000
+ SHF_MERGE = 0x10
+ SHF_ORDERED = 0x4000000
+ SHF_OS_NONCONFORMING = 0x100
+ SHF_RELA_LIVEPATCH = 0x100000
+ SHF_RO_AFTER_INIT = 0x200000
+ SHF_STRINGS = 0x20
+ SHF_TLS = 0x400
+ SHF_WRITE = 0x1
+ SHN_ABS = 0xfff1
+ SHN_COMMON = 0xfff2
+ SHN_HIPROC = 0xff1f
+ SHN_HIRESERVE = 0xffff
+ SHN_LIVEPATCH = 0xff20
+ SHN_LOPROC = 0xff00
+ SHN_LORESERVE = 0xff00
+ SHN_UNDEF = 0x0
+ SHT_DYNAMIC = 0x6
+ SHT_DYNSYM = 0xb
+ SHT_HASH = 0x5
+ SHT_HIPROC = 0x7fffffff
+ SHT_HIUSER = 0xffffffff
+ SHT_LOPROC = 0x70000000
+ SHT_LOUSER = 0x80000000
+ SHT_NOBITS = 0x8
+ SHT_NOTE = 0x7
+ SHT_NULL = 0x0
+ SHT_NUM = 0xc
+ SHT_PROGBITS = 0x1
+ SHT_REL = 0x9
+ SHT_RELA = 0x4
+ SHT_SHLIB = 0xa
+ SHT_STRTAB = 0x3
+ SHT_SYMTAB = 0x2
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -3317,6 +3666,16 @@ const (
STATX_UID = 0x8
STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000
+ STB_GLOBAL = 0x1
+ STB_LOCAL = 0x0
+ STB_WEAK = 0x2
+ STT_COMMON = 0x5
+ STT_FILE = 0x4
+ STT_FUNC = 0x2
+ STT_NOTYPE = 0x0
+ STT_OBJECT = 0x1
+ STT_SECTION = 0x3
+ STT_TLS = 0x6
SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
SYNC_FILE_RANGE_WRITE = 0x2
@@ -3553,6 +3912,8 @@ const (
UTIME_OMIT = 0x3ffffffe
V9FS_MAGIC = 0x1021997
VERASE = 0x2
+ VER_FLG_BASE = 0x1
+ VER_FLG_WEAK = 0x2
VINTR = 0x0
VKILL = 0x3
VLNEXT = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 1c37f9fb..97a61fc5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -116,6 +116,8 @@ const (
IEXTEN = 0x8000
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x80044803
+ IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 6f54d34a..a0d6d498 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -116,6 +116,8 @@ const (
IEXTEN = 0x8000
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x80044803
+ IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 783ec5c1..dd9c903f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x8000
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x80044803
+ IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index ca83d3ba..384c61ca 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -120,6 +120,8 @@ const (
IEXTEN = 0x8000
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x80044803
+ IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 607e611c..6384c983 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -116,6 +116,8 @@ const (
IEXTEN = 0x8000
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x80044803
+ IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index b9cb5bd3..553c1c6f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x100
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
+ IOCTL_MEI_NOTIFY_GET = 0x40044803
+ IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 65b078a6..b3339f20 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x100
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
+ IOCTL_MEI_NOTIFY_GET = 0x40044803
+ IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 5298a303..177091d2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x100
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
+ IOCTL_MEI_NOTIFY_GET = 0x40044803
+ IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 7bc557c8..c5abf156 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x100
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
+ IOCTL_MEI_NOTIFY_GET = 0x40044803
+ IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 152399bb..f1f3fadf 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x400
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x40044803
+ IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 1a1ce240..203ad9c5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x400
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x40044803
+ IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 4231a1fb..4b9abcb2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x400
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x40044803
+ IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 21c0e952..f8798303 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x8000
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x80044803
+ IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index f00d1cd7..64347eb3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -115,6 +115,8 @@ const (
IEXTEN = 0x8000
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
+ IOCTL_MEI_NOTIFY_GET = 0x80044803
+ IOCTL_MEI_NOTIFY_SET = 0x40044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index bc8d539e..7d719117 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -119,6 +119,8 @@ const (
IEXTEN = 0x8000
IN_CLOEXEC = 0x400000
IN_NONBLOCK = 0x4000
+ IOCTL_MEI_NOTIFY_GET = 0x40044803
+ IOCTL_MEI_NOTIFY_SET = 0x80044802
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index 5cc1e8eb..8935d10a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) {
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setMemPolicy(mode int, mask *CPUSet, size int) (err error) {
+ _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 944e75a1..c1a46701 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -3590,6 +3590,8 @@ type Nhmsg struct {
Flags uint32
}
+const SizeofNhmsg = 0x8
+
type NexthopGrp struct {
Id uint32
Weight uint8
@@ -3597,6 +3599,8 @@ type NexthopGrp struct {
Resvd2 uint16
}
+const SizeofNexthopGrp = 0x8
+
const (
NHA_UNSPEC = 0x0
NHA_ID = 0x1
@@ -6332,3 +6336,30 @@ type SockDiagReq struct {
}
const RTM_NEWNVLAN = 0x70
+
+const (
+ MPOL_BIND = 0x2
+ MPOL_DEFAULT = 0x0
+ MPOL_F_ADDR = 0x2
+ MPOL_F_MEMS_ALLOWED = 0x4
+ MPOL_F_MOF = 0x8
+ MPOL_F_MORON = 0x10
+ MPOL_F_NODE = 0x1
+ MPOL_F_NUMA_BALANCING = 0x2000
+ MPOL_F_RELATIVE_NODES = 0x4000
+ MPOL_F_SHARED = 0x1
+ MPOL_F_STATIC_NODES = 0x8000
+ MPOL_INTERLEAVE = 0x3
+ MPOL_LOCAL = 0x4
+ MPOL_MAX = 0x7
+ MPOL_MF_INTERNAL = 0x10
+ MPOL_MF_LAZY = 0x8
+ MPOL_MF_MOVE_ALL = 0x4
+ MPOL_MF_MOVE = 0x2
+ MPOL_MF_STRICT = 0x1
+ MPOL_MF_VALID = 0x7
+ MPOL_MODE_FLAGS = 0xe000
+ MPOL_PREFERRED = 0x1
+ MPOL_PREFERRED_MANY = 0x5
+ MPOL_WEIGHTED_INTERLEAVE = 0x6
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
index 439548ec..50e8e644 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
@@ -104,7 +104,7 @@ type Statvfs_t struct {
Fsid uint32
Namemax uint32
Owner uint32
- Spare [4]uint32
+ Spare [4]uint64
Fstypename [32]byte
Mntonname [1024]byte
Mntfromname [1024]byte
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index bd513373..69439df2 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -892,8 +892,12 @@ const socket_error = uintptr(^uint32(0))
//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
+//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2
+//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2
//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
+//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable
//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
+//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2
//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
@@ -916,6 +920,17 @@ type RawSockaddrInet6 struct {
Scope_id uint32
}
+// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See
+// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet.
+//
+// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using
+// unsafe, depending on the address family.
+type RawSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Data [6]uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 358be3c7..6e4f50eb 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -2320,6 +2320,82 @@ type MibIfRow2 struct {
OutQLen uint64
}
+// IP_ADDRESS_PREFIX stores an IP address prefix. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix.
+type IpAddressPrefix struct {
+ Prefix RawSockaddrInet
+ PrefixLength uint8
+}
+
+// NL_ROUTE_ORIGIN enumeration from nldef.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin.
+const (
+ NlroManual = 0
+ NlroWellKnown = 1
+ NlroDHCP = 2
+ NlroRouterAdvertisement = 3
+ Nlro6to4 = 4
+)
+
+// NL_ROUTE_ORIGIN enumeration from nldef.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol.
+const (
+ MIB_IPPROTO_OTHER = 1
+ MIB_IPPROTO_LOCAL = 2
+ MIB_IPPROTO_NETMGMT = 3
+ MIB_IPPROTO_ICMP = 4
+ MIB_IPPROTO_EGP = 5
+ MIB_IPPROTO_GGP = 6
+ MIB_IPPROTO_HELLO = 7
+ MIB_IPPROTO_RIP = 8
+ MIB_IPPROTO_IS_IS = 9
+ MIB_IPPROTO_ES_IS = 10
+ MIB_IPPROTO_CISCO = 11
+ MIB_IPPROTO_BBN = 12
+ MIB_IPPROTO_OSPF = 13
+ MIB_IPPROTO_BGP = 14
+ MIB_IPPROTO_IDPR = 15
+ MIB_IPPROTO_EIGRP = 16
+ MIB_IPPROTO_DVMRP = 17
+ MIB_IPPROTO_RPL = 18
+ MIB_IPPROTO_DHCP = 19
+ MIB_IPPROTO_NT_AUTOSTATIC = 10002
+ MIB_IPPROTO_NT_STATIC = 10006
+ MIB_IPPROTO_NT_STATIC_NON_DOD = 10007
+)
+
+// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2.
+type MibIpForwardRow2 struct {
+ InterfaceLuid uint64
+ InterfaceIndex uint32
+ DestinationPrefix IpAddressPrefix
+ NextHop RawSockaddrInet
+ SitePrefixLength uint8
+ ValidLifetime uint32
+ PreferredLifetime uint32
+ Metric uint32
+ Protocol uint32
+ Loopback uint8
+ AutoconfigureAddress uint8
+ Publish uint8
+ Immortal uint8
+ Age uint32
+ Origin uint32
+}
+
+// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2.
+type MibIpForwardTable2 struct {
+ NumEntries uint32
+ Table [1]MibIpForwardRow2
+}
+
+// Rows returns the IP route entries in the table.
+func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 {
+ return unsafe.Slice(&t.Table[0], t.NumEntries)
+}
+
// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
type MibUnicastIpAddressRow struct {
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 426151a0..f25b7308 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -182,13 +182,17 @@ var (
procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute")
procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute")
procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2")
+ procFreeMibTable = modiphlpapi.NewProc("FreeMibTable")
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo")
procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx")
procGetIfEntry = modiphlpapi.NewProc("GetIfEntry")
procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex")
+ procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2")
+ procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2")
procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange")
+ procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2")
procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
procAddDllDirectory = modkernel32.NewProc("AddDllDirectory")
procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject")
@@ -1624,6 +1628,11 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
return
}
+func FreeMibTable(memory unsafe.Pointer) {
+ syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory))
+ return
+}
+
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)))
if r0 != 0 {
@@ -1664,6 +1673,22 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
return
}
+func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
@@ -1684,6 +1709,18 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa
return
}
+func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+ var _p0 uint32
+ if initialNotification {
+ _p0 = 1
+ }
+ r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
var _p0 uint32
if initialNotification {
diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go
index bddb2e2a..6ec537cd 100644
--- a/vendor/golang.org/x/term/terminal.go
+++ b/vendor/golang.org/x/term/terminal.go
@@ -160,7 +160,9 @@ const (
keyEnd
keyDeleteWord
keyDeleteLine
+ keyDelete
keyClearScreen
+ keyTranspose
keyPasteStart
keyPasteEnd
)
@@ -194,6 +196,8 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
return keyDeleteLine, b[1:]
case 12: // ^L
return keyClearScreen, b[1:]
+ case 20: // ^T
+ return keyTranspose, b[1:]
case 23: // ^W
return keyDeleteWord, b[1:]
case 14: // ^N
@@ -228,6 +232,10 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
}
}
+ if !pasteActive && len(b) >= 4 && b[0] == keyEscape && b[1] == '[' && b[2] == '3' && b[3] == '~' {
+ return keyDelete, b[4:]
+ }
+
if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
switch b[5] {
case 'C':
@@ -413,7 +421,7 @@ func (t *Terminal) eraseNPreviousChars(n int) {
}
}
-// countToLeftWord returns then number of characters from the cursor to the
+// countToLeftWord returns the number of characters from the cursor to the
// start of the previous word.
func (t *Terminal) countToLeftWord() int {
if t.pos == 0 {
@@ -438,7 +446,7 @@ func (t *Terminal) countToLeftWord() int {
return t.pos - pos
}
-// countToRightWord returns then number of characters from the cursor to the
+// countToRightWord returns the number of characters from the cursor to the
// start of the next word.
func (t *Terminal) countToRightWord() int {
pos := t.pos
@@ -478,7 +486,7 @@ func visualLength(runes []rune) int {
return length
}
-// histroryAt unlocks the terminal and relocks it while calling History.At.
+// historyAt unlocks the terminal and relocks it while calling History.At.
func (t *Terminal) historyAt(idx int) (string, bool) {
t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer.
defer t.lock.Lock() // panic in At (or Len) protection.
@@ -590,7 +598,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
}
t.line = t.line[:t.pos]
t.moveCursorToPos(t.pos)
- case keyCtrlD:
+ case keyCtrlD, keyDelete:
// Erase the character under the current position.
// The EOF case when the line is empty is handled in
// readLine().
@@ -600,6 +608,24 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
}
case keyCtrlU:
t.eraseNPreviousChars(t.pos)
+ case keyTranspose:
+ // This transposes the two characters around the cursor and advances the cursor. Best-effort.
+ if len(t.line) < 2 || t.pos < 1 {
+ return
+ }
+ swap := t.pos
+ if swap == len(t.line) {
+ swap-- // special: at end of line, swap previous two chars
+ }
+ t.line[swap-1], t.line[swap] = t.line[swap], t.line[swap-1]
+ if t.pos < len(t.line) {
+ t.pos++
+ }
+ if t.echo {
+ t.moveCursorToPos(swap - 1)
+ t.writeLine(t.line[swap-1:])
+ t.moveCursorToPos(t.pos)
+ }
case keyClearScreen:
// Erases the screen and moves the cursor to the home position.
t.queue([]rune("\x1b[2J\x1b[H"))
diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go
index dd99ad14..ce28c906 100644
--- a/vendor/golang.org/x/text/encoding/unicode/unicode.go
+++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go
@@ -60,9 +60,9 @@ func (utf8bomEncoding) NewDecoder() *encoding.Decoder {
}
var utf8enc = &internal.Encoding{
- &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()},
- "UTF-8",
- identifier.UTF8,
+ Encoding: &internal.SimpleEncoding{Decoder: utf8Decoder{}, Encoder: runes.ReplaceIllFormed()},
+ Name: "UTF-8",
+ MIB: identifier.UTF8,
}
type utf8bomDecoder struct {
diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go
index 9d2ae547..fb827323 100644
--- a/vendor/golang.org/x/text/unicode/bidi/core.go
+++ b/vendor/golang.org/x/text/unicode/bidi/core.go
@@ -427,13 +427,6 @@ type isolatingRunSequence struct {
func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
-func maxLevel(a, b level) level {
- if a > b {
- return a
- }
- return b
-}
-
// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
// either L or R, for each isolating run sequence.
func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
@@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
indexes: indexes,
types: types,
level: level,
- sos: typeForLevel(maxLevel(prevLevel, level)),
- eos: typeForLevel(maxLevel(succLevel, level)),
+ sos: typeForLevel(max(prevLevel, level)),
+ eos: typeForLevel(max(succLevel, level)),
}
}
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 1de0ce66..2079de7b 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -33,17 +33,21 @@ guidelines, there may be valid reasons to do so, but it should be rare.
## Guidelines for Pull Requests
-How to get your contributions merged smoothly and quickly:
+Please read the following carefully to ensure your contributions can be merged
+smoothly and quickly.
+
+### PR Contents
- Create **small PRs** that are narrowly focused on **addressing a single
concern**. We often receive PRs that attempt to fix several things at the same
time, and if one part of the PR has a problem, that will hold up the entire
PR.
-- For **speculative changes**, consider opening an issue and discussing it
- first. If you are suggesting a behavioral or API change, consider starting
- with a [gRFC proposal](https://github.com/grpc/proposal). Many new features
- that are not bug fixes will require cross-language agreement.
+- If your change does not address an **open issue** with an **agreed
+ resolution**, consider opening an issue and discussing it first. If you are
+ suggesting a behavioral or API change, consider starting with a [gRFC
+ proposal](https://github.com/grpc/proposal). Many new features that are not
+ bug fixes will require cross-language agreement.
- If you want to fix **formatting or style**, consider whether your changes are
an obvious improvement or might be considered a personal preference. If a
@@ -56,16 +60,6 @@ How to get your contributions merged smoothly and quickly:
often written as "iff". Please do not make spelling correction changes unless
you are certain they are misspellings.
-- Provide a good **PR description** as a record of **what** change is being made
- and **why** it was made. Link to a GitHub issue if it exists.
-
-- Maintain a **clean commit history** and use **meaningful commit messages**.
- PRs with messy commit histories are difficult to review and won't be merged.
- Before sending your PR, ensure your changes are based on top of the latest
- `upstream/master` commits, and avoid rebasing in the middle of a code review.
- You should **never use `git push -f`** unless absolutely necessary during a
- review, as it can interfere with GitHub's tracking of comments.
-
- **All tests need to be passing** before your change can be merged. We
recommend you run tests locally before creating your PR to catch breakages
early on:
@@ -81,15 +75,80 @@ How to get your contributions merged smoothly and quickly:
GitHub, which will trigger a GitHub Actions run that you can use to verify
everything is passing.
-- If you are adding a new file, make sure it has the **copyright message**
+- Note that there are two GitHub actions checks that need not be green:
+
+ 1. We test the freshness of the generated proto code we maintain via the
+ `vet-proto` check. If the source proto files are updated, but our repo is
+ not updated, an optional checker will fail. This will be fixed by our team
+ in a separate PR and will not prevent the merge of your PR.
+
+ 2. We run a checker that will fail if there is any change in dependencies of
+ an exported package via the `dependencies` check. If new dependencies are
+ added that are not appropriate, we may not accept your PR (see below).
+
+- If you are adding a **new file**, make sure it has the **copyright message**
template at the top as a comment. You can copy the message from an existing
file and update the year.
- The grpc package should only depend on standard Go packages and a small number
of exceptions. **If your contribution introduces new dependencies**, you will
- need a discussion with gRPC-Go maintainers. A GitHub action check will run on
- every PR, and will flag any transitive dependency changes from any public
- package.
+ need a discussion with gRPC-Go maintainers.
+
+### PR Descriptions
+
+- **PR titles** should start with the name of the component being addressed, or
+ the type of change. Examples: transport, client, server, round_robin, xds,
+ cleanup, deps.
+
+- Read and follow the **guidelines for PR titles and descriptions** here:
+ https://google.github.io/eng-practices/review/developer/cl-descriptions.html
+
+ *particularly* the sections "First Line" and "Body is Informative".
+
+ Note: your PR description will be used as the git commit message in a
+ squash-and-merge if your PR is approved. We may make changes to this as
+ necessary.
+
+- **Does this PR relate to an open issue?** On the first line, please use the
+ tag `Fixes #` to ensure the issue is closed when the PR is merged. Or
+ use `Updates #` if the PR is related to an open issue, but does not fix
+ it. Consider filing an issue if one does not already exist.
+
+- PR descriptions *must* conclude with **release notes** as follows:
+
+ ```
+ RELEASE NOTES:
+ * :
+ ```
+
+ This need not match the PR title.
+
+ The summary must:
+
+ * be something that gRPC users will understand.
+
+ * clearly explain the feature being added, the issue being fixed, or the
+ behavior being changed, etc. If fixing a bug, be clear about how the bug
+ can be triggered by an end-user.
+
+ * begin with a capital letter and use complete sentences.
+
+ * be as short as possible to describe the change being made.
+
+ If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or
+ GitHub-related, use `RELEASE NOTES: n/a`.
+
+### PR Process
+
+- Please **self-review** your code changes before sending your PR. This will
+ prevent simple, obvious errors from causing delays.
+
+- Maintain a **clean commit history** and use **meaningful commit messages**.
+ PRs with messy commit histories are difficult to review and won't be merged.
+ Before sending your PR, ensure your changes are based on top of the latest
+ `upstream/master` commits, and avoid rebasing in the middle of a code review.
+ You should **never use `git push -f`** unless absolutely necessary during a
+ review, as it can interfere with GitHub's tracking of comments.
- Unless your PR is trivial, you should **expect reviewer comments** that you
will need to address before merging. We'll label the PR as `Status: Requires
@@ -98,5 +157,3 @@ How to get your contributions merged smoothly and quickly:
`stale`, and we will automatically close it after 7 days if we don't hear back
from you. Please feel free to ping issues or bugs if you do not get a response
within a week.
-
-- Exceptions to the rules can be made if there's a compelling reason to do so.
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index ea889981..b4bc3a2b 100644
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -16,55 +16,124 @@
*
*/
-// Package pickfirst contains the pick_first load balancing policy.
+// Package pickfirst contains the pick_first load balancing policy which
+// is the universal leaf policy.
package pickfirst
import (
"encoding/json"
"errors"
"fmt"
- rand "math/rand/v2"
+ "net"
+ "net/netip"
+ "sync"
+ "time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/pickfirst/internal"
"google.golang.org/grpc/connectivity"
+ expstats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
-
- _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
)
func init() {
- if envconfig.NewPickFirstEnabled {
- return
- }
balancer.Register(pickfirstBuilder{})
}
-var logger = grpclog.Component("pick-first-lb")
+// Name is the name of the pick_first balancer.
+const Name = "pick_first"
+
+// enableHealthListenerKeyType is a unique key type used in resolver
+// attributes to indicate whether the health listener usage is enabled.
+type enableHealthListenerKeyType struct{}
+
+var (
+ logger = grpclog.Component("pick-first-leaf-lb")
+ disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.disconnections",
+ Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
+ Unit: "{disconnection}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_succeeded",
+ Description: "EXPERIMENTAL. Number of successful connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_failed",
+ Description: "EXPERIMENTAL. Number of failed connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+)
const (
- // Name is the name of the pick_first balancer.
- Name = "pick_first"
- logPrefix = "[pick-first-lb %p] "
+ // TODO: change to pick-first when this becomes the default pick_first policy.
+ logPrefix = "[pick-first-leaf-lb %p] "
+ // connectionDelayInterval is the time to wait for during the happy eyeballs
+ // pass before starting the next connection attempt.
+ connectionDelayInterval = 250 * time.Millisecond
+)
+
+type ipAddrFamily int
+
+const (
+ // ipAddrFamilyUnknown represents strings that can't be parsed as an IP
+ // address.
+ ipAddrFamilyUnknown ipAddrFamily = iota
+ ipAddrFamilyV4
+ ipAddrFamilyV6
)
type pickfirstBuilder struct{}
-func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
- b := &pickfirstBalancer{cc: cc}
+func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
+ b := &pickfirstBalancer{
+ cc: cc,
+ target: bo.Target.String(),
+ metricsRecorder: cc.MetricsRecorder(),
+
+ subConns: resolver.NewAddressMapV2[*scData](),
+ state: connectivity.Connecting,
+ cancelConnectionTimer: func() {},
+ }
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
}
-func (pickfirstBuilder) Name() string {
+func (b pickfirstBuilder) Name() string {
return Name
}
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var cfg pfConfig
+ if err := json.Unmarshal(js, &cfg); err != nil {
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+ }
+ return cfg, nil
+}
+
+// EnableHealthListener updates the state to configure pickfirst for using a
+// generic health listener.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func EnableHealthListener(state resolver.State) resolver.State {
+ state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
+ return state
+}
+
type pfConfig struct {
serviceconfig.LoadBalancingConfig `json:"-"`
@@ -74,90 +143,129 @@ type pfConfig struct {
ShuffleAddressList bool `json:"shuffleAddressList"`
}
-func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- var cfg pfConfig
- if err := json.Unmarshal(js, &cfg); err != nil {
- return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+// scData keeps track of the current state of the subConn.
+// It is not safe for concurrent access.
+type scData struct {
+ // The following fields are initialized at build time and read-only after
+ // that.
+ subConn balancer.SubConn
+ addr resolver.Address
+
+ rawConnectivityState connectivity.State
+ // The effective connectivity state based on raw connectivity, health state
+ // and after following sticky TransientFailure behaviour defined in A62.
+ effectiveState connectivity.State
+ lastErr error
+ connectionFailedInFirstPass bool
+}
+
+func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+ sd := &scData{
+ rawConnectivityState: connectivity.Idle,
+ effectiveState: connectivity.Idle,
+ addr: addr,
}
- return cfg, nil
+ sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
+ StateListener: func(state balancer.SubConnState) {
+ b.updateSubConnState(sd, state)
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ sd.subConn = sc
+ return sd, nil
}
type pickfirstBalancer struct {
- logger *internalgrpclog.PrefixLogger
- state connectivity.State
- cc balancer.ClientConn
- subConn balancer.SubConn
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+ logger *internalgrpclog.PrefixLogger
+ cc balancer.ClientConn
+ target string
+ metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
+
+ // The mutex is used to ensure synchronization of updates triggered
+ // from the idle picker and the already serialized resolver,
+ // SubConn state updates.
+ mu sync.Mutex
+ // State reported to the channel based on SubConn states and resolver
+ // updates.
+ state connectivity.State
+ // scData for active subonns mapped by address.
+ subConns *resolver.AddressMapV2[*scData]
+ addressList addressList
+ firstPass bool
+ numTF int
+ cancelConnectionTimer func()
+ healthCheckingEnabled bool
}
+// ResolverError is called by the ClientConn when the name resolver produces
+// an error or when pickfirst determined the resolver update to be invalid.
func (b *pickfirstBalancer) ResolverError(err error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.resolverErrorLocked(err)
+}
+
+func (b *pickfirstBalancer) resolverErrorLocked(err error) {
if b.logger.V(2) {
b.logger.Infof("Received error from the name resolver: %v", err)
}
- if b.subConn == nil {
- b.state = connectivity.TransientFailure
- }
- if b.state != connectivity.TransientFailure {
- // The picker will not change since the balancer does not currently
- // report an error.
+ // The picker will not change since the balancer does not currently
+ // report an error. If the balancer hasn't received a single good resolver
+ // update yet, transition to TRANSIENT_FAILURE.
+ if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
+ if b.logger.V(2) {
+ b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
+ }
return
}
- b.cc.UpdateState(balancer.State{
+
+ b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
})
}
-// Shuffler is an interface for shuffling an address list.
-type Shuffler interface {
- ShuffleAddressListForTesting(n int, swap func(i, j int))
-}
-
-// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
-// is the number of elements. swap swaps the elements with indexes i and j.
-func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
-
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.cancelConnectionTimer()
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
- // The resolver reported an empty address list. Treat it like an error by
- // calling b.ResolverError.
- if b.subConn != nil {
- // Shut down the old subConn. All addresses were removed, so it is
- // no longer valid.
- b.subConn.Shutdown()
- b.subConn = nil
- }
- b.ResolverError(errors.New("produced zero addresses"))
+ // Cleanup state pertaining to the previous resolver state.
+ // Treat an empty address list like an error by calling b.ResolverError.
+ b.closeSubConnsLocked()
+ b.addressList.updateAddrs(nil)
+ b.resolverErrorLocked(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
- // We don't have to guard this block with the env var because ParseConfig
- // already does so.
+ b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
cfg, ok := state.BalancerConfig.(pfConfig)
if state.BalancerConfig != nil && !ok {
- return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
+ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
}
if b.logger.V(2) {
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
}
- var addrs []resolver.Address
+ var newAddrs []resolver.Address
if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
- // Perform the optional shuffling described in gRFC A62. The shuffling will
- // change the order of endpoints but not touch the order of the addresses
- // within each endpoint. - A61
+ // Perform the optional shuffling described in gRFC A62. The shuffling
+ // will change the order of endpoints but not touch the order of the
+ // addresses within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
- // "Flatten the list by concatenating the ordered list of addresses for each
- // of the endpoints, in order." - A61
+ // "Flatten the list by concatenating the ordered list of addresses for
+ // each of the endpoints, in order." - A61
for _, endpoint := range endpoints {
- // "In the flattened list, interleave addresses from the two address
- // families, as per RFC-8304 section 4." - A61
- // TODO: support the above language.
- addrs = append(addrs, endpoint.Addresses...)
+ newAddrs = append(newAddrs, endpoint.Addresses...)
}
} else {
// Endpoints not set, process addresses until we migrate resolver
@@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
- addrs = state.ResolverState.Addresses
+ newAddrs = state.ResolverState.Addresses
if cfg.ShuffleAddressList {
- addrs = append([]resolver.Address{}, addrs...)
- rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+ newAddrs = append([]resolver.Address{}, newAddrs...)
+ internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] })
}
}
- if b.subConn != nil {
- b.cc.UpdateAddresses(b.subConn, addrs)
+ // If an address appears in multiple endpoints or in the same endpoint
+ // multiple times, we keep it only once. We will create only one SubConn
+ // for the address because an AddressMap is used to store SubConns.
+ // Not de-duplicating would result in attempting to connect to the same
+ // SubConn multiple times in the same pass. We don't want this.
+ newAddrs = deDupAddresses(newAddrs)
+ newAddrs = interleaveAddresses(newAddrs)
+
+ prevAddr := b.addressList.currentAddress()
+ prevSCData, found := b.subConns.Get(prevAddr)
+ prevAddrsCount := b.addressList.size()
+ isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
+ b.addressList.updateAddrs(newAddrs)
+
+ // If the previous ready SubConn exists in new address list,
+ // keep this connection and don't create new SubConns.
+ if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
return nil
}
- var subConn balancer.SubConn
- subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
- StateListener: func(state balancer.SubConnState) {
- b.updateSubConnState(subConn, state)
- },
- })
- if err != nil {
- if b.logger.V(2) {
- b.logger.Infof("Failed to create new SubConn: %v", err)
- }
- b.state = connectivity.TransientFailure
- b.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
+ b.reconcileSubConnsLocked(newAddrs)
+ // If it's the first resolver update or the balancer was already READY
+ // (but the new address list does not contain the ready SubConn) or
+ // CONNECTING, enter CONNECTING.
+ // We may be in TRANSIENT_FAILURE due to a previous empty address list,
+ // we should still enter CONNECTING because the sticky TF behaviour
+ // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
+ // due to connectivity failures.
+ if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
+ // Start connection attempt at first address.
+ b.forceUpdateConcludedStateLocked(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
- return balancer.ErrBadResolverState
+ b.startFirstPassLocked()
+ } else if b.state == connectivity.TransientFailure {
+ // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
+ // we're READY. See A62.
+ b.startFirstPassLocked()
}
- b.subConn = subConn
- b.state = connectivity.Idle
- b.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- b.subConn.Connect()
return nil
}
@@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
}
-func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
- if b.logger.V(2) {
- b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
+func (b *pickfirstBalancer) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.closeSubConnsLocked()
+ b.cancelConnectionTimer()
+ b.state = connectivity.Shutdown
+}
+
+// ExitIdle moves the balancer out of idle state. It can be called concurrently
+// by the idlePicker and clientConn so access to variables should be
+// synchronized.
+func (b *pickfirstBalancer) ExitIdle() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.state == connectivity.Idle {
+ // Move the balancer into CONNECTING state immediately. This is done to
+ // avoid staying in IDLE if a resolver update arrives before the first
+ // SubConn reports CONNECTING.
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ b.startFirstPassLocked()
+ }
+}
+
+func (b *pickfirstBalancer) startFirstPassLocked() {
+ b.firstPass = true
+ b.numTF = 0
+ // Reset the connection attempt record for existing SubConns.
+ for _, sd := range b.subConns.Values() {
+ sd.connectionFailedInFirstPass = false
+ }
+ b.requestConnectionLocked()
+}
+
+func (b *pickfirstBalancer) closeSubConnsLocked() {
+ for _, sd := range b.subConns.Values() {
+ sd.subConn.Shutdown()
+ }
+ b.subConns = resolver.NewAddressMapV2[*scData]()
+}
+
+// deDupAddresses ensures that each address appears only once in the slice.
+func deDupAddresses(addrs []resolver.Address) []resolver.Address {
+ seenAddrs := resolver.NewAddressMapV2[bool]()
+ retAddrs := []resolver.Address{}
+
+ for _, addr := range addrs {
+ if _, ok := seenAddrs.Get(addr); ok {
+ continue
+ }
+ seenAddrs.Set(addr, true)
+ retAddrs = append(retAddrs, addr)
+ }
+ return retAddrs
+}
+
+// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
+// as per RFC-8305 section 4.
+// Whichever address family is first in the list is followed by an address of
+// the other address family; that is, if the first address in the list is IPv6,
+// then the first IPv4 address should be moved up in the list to be second in
+// the list. It doesn't support configuring "First Address Family Count", i.e.
+// there will always be a single member of the first address family at the
+// beginning of the interleaved list.
+// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
+// "unknown" family for interleaving.
+// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
+func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
+ familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
+ interleavingOrder := []ipAddrFamily{}
+ for _, addr := range addrs {
+ family := addressFamily(addr.Addr)
+ if _, found := familyAddrsMap[family]; !found {
+ interleavingOrder = append(interleavingOrder, family)
+ }
+ familyAddrsMap[family] = append(familyAddrsMap[family], addr)
+ }
+
+ interleavedAddrs := make([]resolver.Address, 0, len(addrs))
+
+ for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
+ // Some IP types may have fewer addresses than others, so we look for
+ // the next type that has a remaining member to add to the interleaved
+ // list.
+ family := interleavingOrder[curFamilyIdx]
+ remainingMembers := familyAddrsMap[family]
+ if len(remainingMembers) > 0 {
+ interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
+ familyAddrsMap[family] = remainingMembers[1:]
+ }
+ }
+
+ return interleavedAddrs
+}
+
+// addressFamily returns the ipAddrFamily after parsing the address string.
+// If the address isn't of the format "ip-address:port", it returns
+// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
+// using a resolver like passthrough where the address may be a hostname in
+// some format that the dialer can resolve.
+func addressFamily(address string) ipAddrFamily {
+ // Parse the IP after removing the port.
+ host, _, err := net.SplitHostPort(address)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ ip, err := netip.ParseAddr(host)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ switch {
+ case ip.Is4() || ip.Is4In6():
+ return ipAddrFamilyV4
+ case ip.Is6():
+ return ipAddrFamilyV6
+ default:
+ return ipAddrFamilyUnknown
+ }
+}
+
+// reconcileSubConnsLocked updates the active subchannels based on a new address
+// list from the resolver. It does this by:
+// - closing subchannels: any existing subchannels associated with addresses
+// that are no longer in the updated list are shut down.
+// - removing subchannels: entries for these closed subchannels are removed
+// from the subchannel map.
+//
+// This ensures that the subchannel map accurately reflects the current set of
+// addresses received from the name resolver.
+func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
+ newAddrsMap := resolver.NewAddressMapV2[bool]()
+ for _, addr := range newAddrs {
+ newAddrsMap.Set(addr, true)
+ }
+
+ for _, oldAddr := range b.subConns.Keys() {
+ if _, ok := newAddrsMap.Get(oldAddr); ok {
+ continue
+ }
+ val, _ := b.subConns.Get(oldAddr)
+ val.subConn.Shutdown()
+ b.subConns.Delete(oldAddr)
+ }
+}
+
+// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
+// becomes ready, which means that all other subConn must be shutdown.
+func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
+ b.cancelConnectionTimer()
+ for _, sd := range b.subConns.Values() {
+ if sd.subConn != selected.subConn {
+ sd.subConn.Shutdown()
+ }
+ }
+ b.subConns = resolver.NewAddressMapV2[*scData]()
+ b.subConns.Set(selected.addr, selected)
+}
+
+// requestConnectionLocked starts connecting on the subchannel corresponding to
+// the current address. If no subchannel exists, one is created. If the current
+// subchannel is in TransientFailure, a connection to the next address is
+// attempted until a subchannel is found.
+func (b *pickfirstBalancer) requestConnectionLocked() {
+ if !b.addressList.isValid() {
+ return
+ }
+ var lastErr error
+ for valid := true; valid; valid = b.addressList.increment() {
+ curAddr := b.addressList.currentAddress()
+ sd, ok := b.subConns.Get(curAddr)
+ if !ok {
+ var err error
+ // We want to assign the new scData to sd from the outer scope,
+ // hence we can't use := below.
+ sd, err = b.newSCData(curAddr)
+ if err != nil {
+ // This should never happen, unless the clientConn is being shut
+ // down.
+ if b.logger.V(2) {
+ b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
+ }
+ // Do nothing, the LB policy will be closed soon.
+ return
+ }
+ b.subConns.Set(curAddr, sd)
+ }
+
+ switch sd.rawConnectivityState {
+ case connectivity.Idle:
+ sd.subConn.Connect()
+ b.scheduleNextConnectionLocked()
+ return
+ case connectivity.TransientFailure:
+ // The SubConn is being re-used and failed during a previous pass
+ // over the addressList. It has not completed backoff yet.
+ // Mark it as having failed and try the next address.
+ sd.connectionFailedInFirstPass = true
+ lastErr = sd.lastErr
+ continue
+ case connectivity.Connecting:
+ // Wait for the connection attempt to complete or the timer to fire
+ // before attempting the next address.
+ b.scheduleNextConnectionLocked()
+ return
+ default:
+ b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
+ return
+
+ }
+ }
+
+ // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
+ // first pass if possible.
+ b.endFirstPassIfPossibleLocked(lastErr)
+}
+
+func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
+ b.cancelConnectionTimer()
+ if !b.addressList.hasNext() {
+ return
}
- if b.subConn != subConn {
+ curAddr := b.addressList.currentAddress()
+ cancelled := false // Access to this is protected by the balancer's mutex.
+ closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // If the scheduled task is cancelled while acquiring the mutex, return.
+ if cancelled {
+ return
+ }
if b.logger.V(2) {
- b.logger.Infof("Ignored state change because subConn is not recognized")
+ b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
+ }
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
}
+ })
+ // Access to the cancellation callback held by the balancer is guarded by
+ // the balancer's mutex, so it's safe to set the boolean from the callback.
+ b.cancelConnectionTimer = sync.OnceFunc(func() {
+ cancelled = true
+ closeFn()
+ })
+}
+
+func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ oldState := sd.rawConnectivityState
+ sd.rawConnectivityState = newState.ConnectivityState
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes this
+ // SubConn.
+ if !b.isActiveSCData(sd) {
return
}
- if state.ConnectivityState == connectivity.Shutdown {
- b.subConn = nil
+ if newState.ConnectivityState == connectivity.Shutdown {
+ sd.effectiveState = connectivity.Shutdown
return
}
- switch state.ConnectivityState {
- case connectivity.Ready:
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
- })
- case connectivity.Connecting:
- if b.state == connectivity.TransientFailure {
- // We stay in TransientFailure until we are Ready. See A62.
+ // Record a connection attempt when exiting CONNECTING.
+ if newState.ConnectivityState == connectivity.TransientFailure {
+ sd.connectionFailedInFirstPass = true
+ connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+
+ if newState.ConnectivityState == connectivity.Ready {
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ b.shutdownRemainingLocked(sd)
+ if !b.addressList.seekTo(sd.addr) {
+ // This should not fail as we should have only one SubConn after
+ // entering READY. The SubConn should be present in the addressList.
+ b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
return
}
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
+ if !b.healthCheckingEnabled {
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
+ }
+
+ sd.effectiveState = connectivity.Ready
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+ })
+ return
+ }
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
+ }
+ // Send a CONNECTING update to take the SubConn out of sticky-TF if
+ // required.
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
+ sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
+ b.updateSubConnHealthState(sd, scs)
+ })
+ return
+ }
+
+ // If the LB policy is READY, and it receives a subchannel state change,
+ // it means that the READY subchannel has failed.
+ // A SubConn can also transition from CONNECTING directly to IDLE when
+ // a transport is successfully created, but the connection fails
+ // before the SubConn can send the notification for READY. We treat
+ // this as a successful connection and transition to IDLE.
+ // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
+ // part of the if condition below once the issue is fixed.
+ if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
+ // Once a transport fails, the balancer enters IDLE and starts from
+ // the first address when the picker is used.
+ b.shutdownRemainingLocked(sd)
+ sd.effectiveState = newState.ConnectivityState
+ // READY SubConn interspliced in between CONNECTING and IDLE, need to
+ // account for that.
+ if oldState == connectivity.Connecting {
+ // A known issue (https://github.com/grpc/grpc-go/issues/7862)
+ // causes a race that prevents the READY state change notification.
+ // This works around it.
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+ disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
+ b.addressList.reset()
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Idle,
+ Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
+ })
+ return
+ }
+
+ if b.firstPass {
+ switch newState.ConnectivityState {
+ case connectivity.Connecting:
+ // The effective state can be in either IDLE, CONNECTING or
+ // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
+ // TRANSIENT_FAILURE until it's READY. See A62.
+ if sd.effectiveState != connectivity.TransientFailure {
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ }
+ case connectivity.TransientFailure:
+ sd.lastErr = newState.ConnectionError
+ sd.effectiveState = connectivity.TransientFailure
+ // Since we're re-using common SubConns while handling resolver
+ // updates, we could receive an out of turn TRANSIENT_FAILURE from
+ // a pass over the previous address list. Happy Eyeballs will also
+ // cause out of order updates to arrive.
+
+ if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
+ b.cancelConnectionTimer()
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
+ return
+ }
+ }
+
+ // End the first pass if we've seen a TRANSIENT_FAILURE from all
+ // SubConns once.
+ b.endFirstPassIfPossibleLocked(newState.ConnectionError)
+ }
+ return
+ }
+
+ // We have finished the first pass, keep re-connecting failing SubConns.
+ switch newState.ConnectivityState {
+ case connectivity.TransientFailure:
+ b.numTF = (b.numTF + 1) % b.subConns.Len()
+ sd.lastErr = newState.ConnectionError
+ if b.numTF%b.subConns.Len() == 0 {
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: newState.ConnectionError},
+ })
+ }
+ // We don't need to request re-resolution since the SubConn already
+ // does that before reporting TRANSIENT_FAILURE.
+ // TODO: #7534 - Move re-resolution requests from SubConn into
+ // pick_first.
case connectivity.Idle:
- if b.state == connectivity.TransientFailure {
- // We stay in TransientFailure until we are Ready. Also kick the
- // subConn out of Idle into Connecting. See A62.
- b.subConn.Connect()
+ sd.subConn.Connect()
+ }
+}
+
+// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
+// addresses are tried and their SubConns have reported a failure.
+func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
+ // An optimization to avoid iterating over the entire SubConn map.
+ if b.addressList.isValid() {
+ return
+ }
+ // Connect() has been called on all the SubConns. The first pass can be
+ // ended if all the SubConns have reported a failure.
+ for _, sd := range b.subConns.Values() {
+ if !sd.connectionFailedInFirstPass {
return
}
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &idlePicker{subConn: subConn},
+ }
+ b.firstPass = false
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: lastErr},
+ })
+ // Start re-connecting all the SubConns that are already in IDLE.
+ for _, sd := range b.subConns.Values() {
+ if sd.rawConnectivityState == connectivity.Idle {
+ sd.subConn.Connect()
+ }
+ }
+}
+
+func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
+ activeSD, found := b.subConns.Get(sd.addr)
+ return found && activeSD == sd
+}
+
+func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes
+ // this SubConn.
+ if !b.isActiveSCData(sd) {
+ return
+ }
+ sd.effectiveState = state.ConnectivityState
+ switch state.ConnectivityState {
+ case connectivity.Ready:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
})
case connectivity.TransientFailure:
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &picker{err: state.ConnectionError},
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
+ })
+ case connectivity.Connecting:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
+ default:
+ b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
}
- b.state = state.ConnectivityState
}
-func (b *pickfirstBalancer) Close() {
+// updateBalancerState stores the state reported to the channel and calls
+// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
+// updates to the channel.
+func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
+ // In case of TransientFailures allow the picker to be updated to update
+ // the connectivity error, in all other cases don't send duplicate state
+ // updates.
+ if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
+ return
+ }
+ b.forceUpdateConcludedStateLocked(newState)
}
-func (b *pickfirstBalancer) ExitIdle() {
- if b.subConn != nil && b.state == connectivity.Idle {
- b.subConn.Connect()
- }
+// forceUpdateConcludedStateLocked stores the state reported to the channel and
+// calls ClientConn.UpdateState().
+// A separate function is defined to force update the ClientConn state since the
+// channel doesn't correctly assume that LB policies start in CONNECTING and
+// relies on LB policy to send an initial CONNECTING update.
+func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
+ b.state = newState.ConnectivityState
+ b.cc.UpdateState(newState)
}
type picker struct {
@@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
// CONNECTING when Pick is called.
type idlePicker struct {
- subConn balancer.SubConn
+ exitIdle func()
}
func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- i.subConn.Connect()
+ i.exitIdle()
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
+
+// addressList manages sequentially iterating over addresses present in a list
+// of endpoints. It provides a 1 dimensional view of the addresses present in
+// the endpoints.
+// This type is not safe for concurrent access.
+type addressList struct {
+ addresses []resolver.Address
+ idx int
+}
+
+func (al *addressList) isValid() bool {
+ return al.idx < len(al.addresses)
+}
+
+func (al *addressList) size() int {
+ return len(al.addresses)
+}
+
+// increment moves to the next index in the address list.
+// This method returns false if it went off the list, true otherwise.
+func (al *addressList) increment() bool {
+ if !al.isValid() {
+ return false
+ }
+ al.idx++
+ return al.idx < len(al.addresses)
+}
+
+// currentAddress returns the current address pointed to in the addressList.
+// If the list is in an invalid state, it returns an empty address instead.
+func (al *addressList) currentAddress() resolver.Address {
+ if !al.isValid() {
+ return resolver.Address{}
+ }
+ return al.addresses[al.idx]
+}
+
+func (al *addressList) reset() {
+ al.idx = 0
+}
+
+func (al *addressList) updateAddrs(addrs []resolver.Address) {
+ al.addresses = addrs
+ al.reset()
+}
+
+// seekTo returns false if the needle was not found and the current index was
+// left unchanged.
+func (al *addressList) seekTo(needle resolver.Address) bool {
+ for ai, addr := range al.addresses {
+ if !equalAddressIgnoringBalAttributes(&addr, &needle) {
+ continue
+ }
+ al.idx = ai
+ return true
+ }
+ return false
+}
+
+// hasNext returns whether incrementing the addressList will result in moving
+// past the end of the list. If the list has already moved past the end, it
+// returns false.
+func (al *addressList) hasNext() bool {
+ if !al.isValid() {
+ return false
+ }
+ return al.idx+1 < len(al.addresses)
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered
+// equal. This is different from the Equal method on the resolver.Address type
+// which considers all fields to determine equality. Here, we only consider
+// fields that are meaningful to the SubConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes)
+}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
deleted file mode 100644
index 67f315a0..00000000
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
+++ /dev/null
@@ -1,906 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package pickfirstleaf contains the pick_first load balancing policy which
-// will be the universal leaf policy after dualstack changes are implemented.
-//
-// # Experimental
-//
-// Notice: This package is EXPERIMENTAL and may be changed or removed in a
-// later release.
-package pickfirstleaf
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net"
- "net/netip"
- "sync"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/pickfirst/internal"
- "google.golang.org/grpc/connectivity"
- expstats "google.golang.org/grpc/experimental/stats"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/envconfig"
- internalgrpclog "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/pretty"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-func init() {
- if envconfig.NewPickFirstEnabled {
- // Register as the default pick_first balancer.
- Name = "pick_first"
- }
- balancer.Register(pickfirstBuilder{})
-}
-
-// enableHealthListenerKeyType is a unique key type used in resolver
-// attributes to indicate whether the health listener usage is enabled.
-type enableHealthListenerKeyType struct{}
-
-var (
- logger = grpclog.Component("pick-first-leaf-lb")
- // Name is the name of the pick_first_leaf balancer.
- // It is changed to "pick_first" in init() if this balancer is to be
- // registered as the default pickfirst.
- Name = "pick_first_leaf"
- disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.disconnections",
- Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
- Unit: "{disconnection}",
- Labels: []string{"grpc.target"},
- Default: false,
- })
- connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.connection_attempts_succeeded",
- Description: "EXPERIMENTAL. Number of successful connection attempts.",
- Unit: "{attempt}",
- Labels: []string{"grpc.target"},
- Default: false,
- })
- connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.connection_attempts_failed",
- Description: "EXPERIMENTAL. Number of failed connection attempts.",
- Unit: "{attempt}",
- Labels: []string{"grpc.target"},
- Default: false,
- })
-)
-
-const (
- // TODO: change to pick-first when this becomes the default pick_first policy.
- logPrefix = "[pick-first-leaf-lb %p] "
- // connectionDelayInterval is the time to wait for during the happy eyeballs
- // pass before starting the next connection attempt.
- connectionDelayInterval = 250 * time.Millisecond
-)
-
-type ipAddrFamily int
-
-const (
- // ipAddrFamilyUnknown represents strings that can't be parsed as an IP
- // address.
- ipAddrFamilyUnknown ipAddrFamily = iota
- ipAddrFamilyV4
- ipAddrFamilyV6
-)
-
-type pickfirstBuilder struct{}
-
-func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
- b := &pickfirstBalancer{
- cc: cc,
- target: bo.Target.String(),
- metricsRecorder: cc.MetricsRecorder(),
-
- subConns: resolver.NewAddressMapV2[*scData](),
- state: connectivity.Connecting,
- cancelConnectionTimer: func() {},
- }
- b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
- return b
-}
-
-func (b pickfirstBuilder) Name() string {
- return Name
-}
-
-func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- var cfg pfConfig
- if err := json.Unmarshal(js, &cfg); err != nil {
- return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
- }
- return cfg, nil
-}
-
-// EnableHealthListener updates the state to configure pickfirst for using a
-// generic health listener.
-func EnableHealthListener(state resolver.State) resolver.State {
- state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
- return state
-}
-
-type pfConfig struct {
- serviceconfig.LoadBalancingConfig `json:"-"`
-
- // If set to true, instructs the LB policy to shuffle the order of the list
- // of endpoints received from the name resolver before attempting to
- // connect to them.
- ShuffleAddressList bool `json:"shuffleAddressList"`
-}
-
-// scData keeps track of the current state of the subConn.
-// It is not safe for concurrent access.
-type scData struct {
- // The following fields are initialized at build time and read-only after
- // that.
- subConn balancer.SubConn
- addr resolver.Address
-
- rawConnectivityState connectivity.State
- // The effective connectivity state based on raw connectivity, health state
- // and after following sticky TransientFailure behaviour defined in A62.
- effectiveState connectivity.State
- lastErr error
- connectionFailedInFirstPass bool
-}
-
-func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
- sd := &scData{
- rawConnectivityState: connectivity.Idle,
- effectiveState: connectivity.Idle,
- addr: addr,
- }
- sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
- StateListener: func(state balancer.SubConnState) {
- b.updateSubConnState(sd, state)
- },
- })
- if err != nil {
- return nil, err
- }
- sd.subConn = sc
- return sd, nil
-}
-
-type pickfirstBalancer struct {
- // The following fields are initialized at build time and read-only after
- // that and therefore do not need to be guarded by a mutex.
- logger *internalgrpclog.PrefixLogger
- cc balancer.ClientConn
- target string
- metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
-
- // The mutex is used to ensure synchronization of updates triggered
- // from the idle picker and the already serialized resolver,
- // SubConn state updates.
- mu sync.Mutex
- // State reported to the channel based on SubConn states and resolver
- // updates.
- state connectivity.State
- // scData for active subonns mapped by address.
- subConns *resolver.AddressMapV2[*scData]
- addressList addressList
- firstPass bool
- numTF int
- cancelConnectionTimer func()
- healthCheckingEnabled bool
-}
-
-// ResolverError is called by the ClientConn when the name resolver produces
-// an error or when pickfirst determined the resolver update to be invalid.
-func (b *pickfirstBalancer) ResolverError(err error) {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.resolverErrorLocked(err)
-}
-
-func (b *pickfirstBalancer) resolverErrorLocked(err error) {
- if b.logger.V(2) {
- b.logger.Infof("Received error from the name resolver: %v", err)
- }
-
- // The picker will not change since the balancer does not currently
- // report an error. If the balancer hasn't received a single good resolver
- // update yet, transition to TRANSIENT_FAILURE.
- if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
- if b.logger.V(2) {
- b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
- }
- return
- }
-
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
- })
-}
-
-func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.cancelConnectionTimer()
- if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
- // Cleanup state pertaining to the previous resolver state.
- // Treat an empty address list like an error by calling b.ResolverError.
- b.closeSubConnsLocked()
- b.addressList.updateAddrs(nil)
- b.resolverErrorLocked(errors.New("produced zero addresses"))
- return balancer.ErrBadResolverState
- }
- b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
- cfg, ok := state.BalancerConfig.(pfConfig)
- if state.BalancerConfig != nil && !ok {
- return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
- }
-
- if b.logger.V(2) {
- b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
- }
-
- var newAddrs []resolver.Address
- if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
- // Perform the optional shuffling described in gRFC A62. The shuffling
- // will change the order of endpoints but not touch the order of the
- // addresses within each endpoint. - A61
- if cfg.ShuffleAddressList {
- endpoints = append([]resolver.Endpoint{}, endpoints...)
- internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
- }
-
- // "Flatten the list by concatenating the ordered list of addresses for
- // each of the endpoints, in order." - A61
- for _, endpoint := range endpoints {
- newAddrs = append(newAddrs, endpoint.Addresses...)
- }
- } else {
- // Endpoints not set, process addresses until we migrate resolver
- // emissions fully to Endpoints. The top channel does wrap emitted
- // addresses with endpoints, however some balancers such as weighted
- // target do not forward the corresponding correct endpoints down/split
- // endpoints properly. Once all balancers correctly forward endpoints
- // down, can delete this else conditional.
- newAddrs = state.ResolverState.Addresses
- if cfg.ShuffleAddressList {
- newAddrs = append([]resolver.Address{}, newAddrs...)
- internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
- }
- }
-
- // If an address appears in multiple endpoints or in the same endpoint
- // multiple times, we keep it only once. We will create only one SubConn
- // for the address because an AddressMap is used to store SubConns.
- // Not de-duplicating would result in attempting to connect to the same
- // SubConn multiple times in the same pass. We don't want this.
- newAddrs = deDupAddresses(newAddrs)
- newAddrs = interleaveAddresses(newAddrs)
-
- prevAddr := b.addressList.currentAddress()
- prevSCData, found := b.subConns.Get(prevAddr)
- prevAddrsCount := b.addressList.size()
- isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
- b.addressList.updateAddrs(newAddrs)
-
- // If the previous ready SubConn exists in new address list,
- // keep this connection and don't create new SubConns.
- if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
- return nil
- }
-
- b.reconcileSubConnsLocked(newAddrs)
- // If it's the first resolver update or the balancer was already READY
- // (but the new address list does not contain the ready SubConn) or
- // CONNECTING, enter CONNECTING.
- // We may be in TRANSIENT_FAILURE due to a previous empty address list,
- // we should still enter CONNECTING because the sticky TF behaviour
- // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
- // due to connectivity failures.
- if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
- // Start connection attempt at first address.
- b.forceUpdateConcludedStateLocked(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- b.startFirstPassLocked()
- } else if b.state == connectivity.TransientFailure {
- // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
- // we're READY. See A62.
- b.startFirstPassLocked()
- }
- return nil
-}
-
-// UpdateSubConnState is unused as a StateListener is always registered when
-// creating SubConns.
-func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
- b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
-}
-
-func (b *pickfirstBalancer) Close() {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.closeSubConnsLocked()
- b.cancelConnectionTimer()
- b.state = connectivity.Shutdown
-}
-
-// ExitIdle moves the balancer out of idle state. It can be called concurrently
-// by the idlePicker and clientConn so access to variables should be
-// synchronized.
-func (b *pickfirstBalancer) ExitIdle() {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.state == connectivity.Idle {
- b.startFirstPassLocked()
- }
-}
-
-func (b *pickfirstBalancer) startFirstPassLocked() {
- b.firstPass = true
- b.numTF = 0
- // Reset the connection attempt record for existing SubConns.
- for _, sd := range b.subConns.Values() {
- sd.connectionFailedInFirstPass = false
- }
- b.requestConnectionLocked()
-}
-
-func (b *pickfirstBalancer) closeSubConnsLocked() {
- for _, sd := range b.subConns.Values() {
- sd.subConn.Shutdown()
- }
- b.subConns = resolver.NewAddressMapV2[*scData]()
-}
-
-// deDupAddresses ensures that each address appears only once in the slice.
-func deDupAddresses(addrs []resolver.Address) []resolver.Address {
- seenAddrs := resolver.NewAddressMapV2[*scData]()
- retAddrs := []resolver.Address{}
-
- for _, addr := range addrs {
- if _, ok := seenAddrs.Get(addr); ok {
- continue
- }
- retAddrs = append(retAddrs, addr)
- }
- return retAddrs
-}
-
-// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
-// as per RFC-8305 section 4.
-// Whichever address family is first in the list is followed by an address of
-// the other address family; that is, if the first address in the list is IPv6,
-// then the first IPv4 address should be moved up in the list to be second in
-// the list. It doesn't support configuring "First Address Family Count", i.e.
-// there will always be a single member of the first address family at the
-// beginning of the interleaved list.
-// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
-// "unknown" family for interleaving.
-// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
-func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
- familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
- interleavingOrder := []ipAddrFamily{}
- for _, addr := range addrs {
- family := addressFamily(addr.Addr)
- if _, found := familyAddrsMap[family]; !found {
- interleavingOrder = append(interleavingOrder, family)
- }
- familyAddrsMap[family] = append(familyAddrsMap[family], addr)
- }
-
- interleavedAddrs := make([]resolver.Address, 0, len(addrs))
-
- for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
- // Some IP types may have fewer addresses than others, so we look for
- // the next type that has a remaining member to add to the interleaved
- // list.
- family := interleavingOrder[curFamilyIdx]
- remainingMembers := familyAddrsMap[family]
- if len(remainingMembers) > 0 {
- interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
- familyAddrsMap[family] = remainingMembers[1:]
- }
- }
-
- return interleavedAddrs
-}
-
-// addressFamily returns the ipAddrFamily after parsing the address string.
-// If the address isn't of the format "ip-address:port", it returns
-// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
-// using a resolver like passthrough where the address may be a hostname in
-// some format that the dialer can resolve.
-func addressFamily(address string) ipAddrFamily {
- // Parse the IP after removing the port.
- host, _, err := net.SplitHostPort(address)
- if err != nil {
- return ipAddrFamilyUnknown
- }
- ip, err := netip.ParseAddr(host)
- if err != nil {
- return ipAddrFamilyUnknown
- }
- switch {
- case ip.Is4() || ip.Is4In6():
- return ipAddrFamilyV4
- case ip.Is6():
- return ipAddrFamilyV6
- default:
- return ipAddrFamilyUnknown
- }
-}
-
-// reconcileSubConnsLocked updates the active subchannels based on a new address
-// list from the resolver. It does this by:
-// - closing subchannels: any existing subchannels associated with addresses
-// that are no longer in the updated list are shut down.
-// - removing subchannels: entries for these closed subchannels are removed
-// from the subchannel map.
-//
-// This ensures that the subchannel map accurately reflects the current set of
-// addresses received from the name resolver.
-func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
- newAddrsMap := resolver.NewAddressMapV2[bool]()
- for _, addr := range newAddrs {
- newAddrsMap.Set(addr, true)
- }
-
- for _, oldAddr := range b.subConns.Keys() {
- if _, ok := newAddrsMap.Get(oldAddr); ok {
- continue
- }
- val, _ := b.subConns.Get(oldAddr)
- val.subConn.Shutdown()
- b.subConns.Delete(oldAddr)
- }
-}
-
-// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
-// becomes ready, which means that all other subConn must be shutdown.
-func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
- b.cancelConnectionTimer()
- for _, sd := range b.subConns.Values() {
- if sd.subConn != selected.subConn {
- sd.subConn.Shutdown()
- }
- }
- b.subConns = resolver.NewAddressMapV2[*scData]()
- b.subConns.Set(selected.addr, selected)
-}
-
-// requestConnectionLocked starts connecting on the subchannel corresponding to
-// the current address. If no subchannel exists, one is created. If the current
-// subchannel is in TransientFailure, a connection to the next address is
-// attempted until a subchannel is found.
-func (b *pickfirstBalancer) requestConnectionLocked() {
- if !b.addressList.isValid() {
- return
- }
- var lastErr error
- for valid := true; valid; valid = b.addressList.increment() {
- curAddr := b.addressList.currentAddress()
- sd, ok := b.subConns.Get(curAddr)
- if !ok {
- var err error
- // We want to assign the new scData to sd from the outer scope,
- // hence we can't use := below.
- sd, err = b.newSCData(curAddr)
- if err != nil {
- // This should never happen, unless the clientConn is being shut
- // down.
- if b.logger.V(2) {
- b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
- }
- // Do nothing, the LB policy will be closed soon.
- return
- }
- b.subConns.Set(curAddr, sd)
- }
-
- switch sd.rawConnectivityState {
- case connectivity.Idle:
- sd.subConn.Connect()
- b.scheduleNextConnectionLocked()
- return
- case connectivity.TransientFailure:
- // The SubConn is being re-used and failed during a previous pass
- // over the addressList. It has not completed backoff yet.
- // Mark it as having failed and try the next address.
- sd.connectionFailedInFirstPass = true
- lastErr = sd.lastErr
- continue
- case connectivity.Connecting:
- // Wait for the connection attempt to complete or the timer to fire
- // before attempting the next address.
- b.scheduleNextConnectionLocked()
- return
- default:
- b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
- return
-
- }
- }
-
- // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
- // first pass if possible.
- b.endFirstPassIfPossibleLocked(lastErr)
-}
-
-func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
- b.cancelConnectionTimer()
- if !b.addressList.hasNext() {
- return
- }
- curAddr := b.addressList.currentAddress()
- cancelled := false // Access to this is protected by the balancer's mutex.
- closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
- b.mu.Lock()
- defer b.mu.Unlock()
- // If the scheduled task is cancelled while acquiring the mutex, return.
- if cancelled {
- return
- }
- if b.logger.V(2) {
- b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
- }
- if b.addressList.increment() {
- b.requestConnectionLocked()
- }
- })
- // Access to the cancellation callback held by the balancer is guarded by
- // the balancer's mutex, so it's safe to set the boolean from the callback.
- b.cancelConnectionTimer = sync.OnceFunc(func() {
- cancelled = true
- closeFn()
- })
-}
-
-func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
- b.mu.Lock()
- defer b.mu.Unlock()
- oldState := sd.rawConnectivityState
- sd.rawConnectivityState = newState.ConnectivityState
- // Previously relevant SubConns can still callback with state updates.
- // To prevent pickers from returning these obsolete SubConns, this logic
- // is included to check if the current list of active SubConns includes this
- // SubConn.
- if !b.isActiveSCData(sd) {
- return
- }
- if newState.ConnectivityState == connectivity.Shutdown {
- sd.effectiveState = connectivity.Shutdown
- return
- }
-
- // Record a connection attempt when exiting CONNECTING.
- if newState.ConnectivityState == connectivity.TransientFailure {
- sd.connectionFailedInFirstPass = true
- connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
- }
-
- if newState.ConnectivityState == connectivity.Ready {
- connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
- b.shutdownRemainingLocked(sd)
- if !b.addressList.seekTo(sd.addr) {
- // This should not fail as we should have only one SubConn after
- // entering READY. The SubConn should be present in the addressList.
- b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
- return
- }
- if !b.healthCheckingEnabled {
- if b.logger.V(2) {
- b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
- }
-
- sd.effectiveState = connectivity.Ready
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Ready,
- Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
- })
- return
- }
- if b.logger.V(2) {
- b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
- }
- // Send a CONNECTING update to take the SubConn out of sticky-TF if
- // required.
- sd.effectiveState = connectivity.Connecting
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
- b.updateSubConnHealthState(sd, scs)
- })
- return
- }
-
- // If the LB policy is READY, and it receives a subchannel state change,
- // it means that the READY subchannel has failed.
- // A SubConn can also transition from CONNECTING directly to IDLE when
- // a transport is successfully created, but the connection fails
- // before the SubConn can send the notification for READY. We treat
- // this as a successful connection and transition to IDLE.
- // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
- // part of the if condition below once the issue is fixed.
- if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
- // Once a transport fails, the balancer enters IDLE and starts from
- // the first address when the picker is used.
- b.shutdownRemainingLocked(sd)
- sd.effectiveState = newState.ConnectivityState
- // READY SubConn interspliced in between CONNECTING and IDLE, need to
- // account for that.
- if oldState == connectivity.Connecting {
- // A known issue (https://github.com/grpc/grpc-go/issues/7862)
- // causes a race that prevents the READY state change notification.
- // This works around it.
- connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
- }
- disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
- b.addressList.reset()
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Idle,
- Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
- })
- return
- }
-
- if b.firstPass {
- switch newState.ConnectivityState {
- case connectivity.Connecting:
- // The effective state can be in either IDLE, CONNECTING or
- // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
- // TRANSIENT_FAILURE until it's READY. See A62.
- if sd.effectiveState != connectivity.TransientFailure {
- sd.effectiveState = connectivity.Connecting
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- }
- case connectivity.TransientFailure:
- sd.lastErr = newState.ConnectionError
- sd.effectiveState = connectivity.TransientFailure
- // Since we're re-using common SubConns while handling resolver
- // updates, we could receive an out of turn TRANSIENT_FAILURE from
- // a pass over the previous address list. Happy Eyeballs will also
- // cause out of order updates to arrive.
-
- if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
- b.cancelConnectionTimer()
- if b.addressList.increment() {
- b.requestConnectionLocked()
- return
- }
- }
-
- // End the first pass if we've seen a TRANSIENT_FAILURE from all
- // SubConns once.
- b.endFirstPassIfPossibleLocked(newState.ConnectionError)
- }
- return
- }
-
- // We have finished the first pass, keep re-connecting failing SubConns.
- switch newState.ConnectivityState {
- case connectivity.TransientFailure:
- b.numTF = (b.numTF + 1) % b.subConns.Len()
- sd.lastErr = newState.ConnectionError
- if b.numTF%b.subConns.Len() == 0 {
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: newState.ConnectionError},
- })
- }
- // We don't need to request re-resolution since the SubConn already
- // does that before reporting TRANSIENT_FAILURE.
- // TODO: #7534 - Move re-resolution requests from SubConn into
- // pick_first.
- case connectivity.Idle:
- sd.subConn.Connect()
- }
-}
-
-// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
-// addresses are tried and their SubConns have reported a failure.
-func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
- // An optimization to avoid iterating over the entire SubConn map.
- if b.addressList.isValid() {
- return
- }
- // Connect() has been called on all the SubConns. The first pass can be
- // ended if all the SubConns have reported a failure.
- for _, sd := range b.subConns.Values() {
- if !sd.connectionFailedInFirstPass {
- return
- }
- }
- b.firstPass = false
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: lastErr},
- })
- // Start re-connecting all the SubConns that are already in IDLE.
- for _, sd := range b.subConns.Values() {
- if sd.rawConnectivityState == connectivity.Idle {
- sd.subConn.Connect()
- }
- }
-}
-
-func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
- activeSD, found := b.subConns.Get(sd.addr)
- return found && activeSD == sd
-}
-
-func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
- b.mu.Lock()
- defer b.mu.Unlock()
- // Previously relevant SubConns can still callback with state updates.
- // To prevent pickers from returning these obsolete SubConns, this logic
- // is included to check if the current list of active SubConns includes
- // this SubConn.
- if !b.isActiveSCData(sd) {
- return
- }
- sd.effectiveState = state.ConnectivityState
- switch state.ConnectivityState {
- case connectivity.Ready:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Ready,
- Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
- })
- case connectivity.TransientFailure:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
- })
- case connectivity.Connecting:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- default:
- b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
- }
-}
-
-// updateBalancerState stores the state reported to the channel and calls
-// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
-// updates to the channel.
-func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
- // In case of TransientFailures allow the picker to be updated to update
- // the connectivity error, in all other cases don't send duplicate state
- // updates.
- if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
- return
- }
- b.forceUpdateConcludedStateLocked(newState)
-}
-
-// forceUpdateConcludedStateLocked stores the state reported to the channel and
-// calls ClientConn.UpdateState().
-// A separate function is defined to force update the ClientConn state since the
-// channel doesn't correctly assume that LB policies start in CONNECTING and
-// relies on LB policy to send an initial CONNECTING update.
-func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
- b.state = newState.ConnectivityState
- b.cc.UpdateState(newState)
-}
-
-type picker struct {
- result balancer.PickResult
- err error
-}
-
-func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- return p.result, p.err
-}
-
-// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
-// CONNECTING when Pick is called.
-type idlePicker struct {
- exitIdle func()
-}
-
-func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- i.exitIdle()
- return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
-}
-
-// addressList manages sequentially iterating over addresses present in a list
-// of endpoints. It provides a 1 dimensional view of the addresses present in
-// the endpoints.
-// This type is not safe for concurrent access.
-type addressList struct {
- addresses []resolver.Address
- idx int
-}
-
-func (al *addressList) isValid() bool {
- return al.idx < len(al.addresses)
-}
-
-func (al *addressList) size() int {
- return len(al.addresses)
-}
-
-// increment moves to the next index in the address list.
-// This method returns false if it went off the list, true otherwise.
-func (al *addressList) increment() bool {
- if !al.isValid() {
- return false
- }
- al.idx++
- return al.idx < len(al.addresses)
-}
-
-// currentAddress returns the current address pointed to in the addressList.
-// If the list is in an invalid state, it returns an empty address instead.
-func (al *addressList) currentAddress() resolver.Address {
- if !al.isValid() {
- return resolver.Address{}
- }
- return al.addresses[al.idx]
-}
-
-func (al *addressList) reset() {
- al.idx = 0
-}
-
-func (al *addressList) updateAddrs(addrs []resolver.Address) {
- al.addresses = addrs
- al.reset()
-}
-
-// seekTo returns false if the needle was not found and the current index was
-// left unchanged.
-func (al *addressList) seekTo(needle resolver.Address) bool {
- for ai, addr := range al.addresses {
- if !equalAddressIgnoringBalAttributes(&addr, &needle) {
- continue
- }
- al.idx = ai
- return true
- }
- return false
-}
-
-// hasNext returns whether incrementing the addressList will result in moving
-// past the end of the list. If the list has already moved past the end, it
-// returns false.
-func (al *addressList) hasNext() bool {
- if !al.isValid() {
- return false
- }
- return al.idx+1 < len(al.addresses)
-}
-
-// equalAddressIgnoringBalAttributes returns true is a and b are considered
-// equal. This is different from the Equal method on the resolver.Address type
-// which considers all fields to determine equality. Here, we only consider
-// fields that are meaningful to the SubConn.
-func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
- return a.Addr == b.Addr && a.ServerName == b.ServerName &&
- a.Attributes.Equal(b.Attributes)
-}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index 22045bf3..22e6e326 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -26,7 +26,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/endpointsharding"
- "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/grpclog"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
)
@@ -47,7 +47,7 @@ func (bb builder) Name() string {
}
func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
- childBuilder := balancer.Get(pickfirstleaf.Name).Build
+ childBuilder := balancer.Get(pickfirst.Name).Build
bal := &rrBalancer{
cc: cc,
Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
@@ -67,6 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
// Enable the health listener in pickfirst children for client side health
// checks and outlier detection, if configured.
- ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
+ ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState),
})
}
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index 948a21ef..2c760e62 100644
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(
if acbw.ccb.cc.dopts.disableHealthCheck {
return noOpRegisterHealthListenerFn
}
+ cfg := acbw.ac.cc.healthCheckConfig()
+ if cfg == nil {
+ return noOpRegisterHealthListenerFn
+ }
regHealthLisFn := internal.RegisterClientHealthCheckListener
if regHealthLisFn == nil {
// The health package is not imported.
- return noOpRegisterHealthListenerFn
- }
- cfg := acbw.ac.cc.healthCheckConfig()
- if cfg == nil {
+ channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.")
return noOpRegisterHealthListenerFn
}
return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index b1364a03..42c61cf9 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.6
+// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 3f762285..b767d3e3 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -35,16 +35,19 @@ import (
"google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/credentials"
+ expstats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
iresolver "google.golang.org/grpc/internal/resolver"
- "google.golang.org/grpc/internal/stats"
+ istats "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
+ "google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
@@ -97,6 +100,41 @@ var (
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
)
+var (
+ disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.subchannel.disconnections",
+ Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
+ Unit: "{disconnection}",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality", "grpc.disconnect_error"},
+ Default: false,
+ })
+ connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.subchannel.connection_attempts_succeeded",
+ Description: "EXPERIMENTAL. Number of successful connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"},
+ Default: false,
+ })
+ connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.subchannel.connection_attempts_failed",
+ Description: "EXPERIMENTAL. Number of failed connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.backend_service", "grpc.lb.locality"},
+ Default: false,
+ })
+ openConnectionsMetric = expstats.RegisterInt64UpDownCount(expstats.MetricDescriptor{
+ Name: "grpc.subchannel.open_connections",
+ Description: "EXPERIMENTAL. Number of open connections.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ OptionalLabels: []string{"grpc.lb.backend_service", "grpc.security_level", "grpc.lb.locality"},
+ Default: false,
+ })
+)
+
const (
defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
defaultClientMaxSendMessageSize = math.MaxInt32
@@ -210,7 +248,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
cc.pickerWrapper = newPickerWrapper()
- cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+ cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+ cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...)
cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
@@ -260,9 +299,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}()
// This creates the name resolver, load balancer, etc.
- if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
- return nil, err
+ if err := cc.exitIdleMode(); err != nil {
+ return nil, fmt.Errorf("failed to exit idle mode: %w", err)
}
+ cc.idlenessMgr.UnsafeSetNotIdle()
// Return now for non-blocking dials.
if !cc.dopts.block {
@@ -330,7 +370,7 @@ func (cc *ClientConn) addTraceEvent(msg string) {
Severity: channelz.CtInfo,
}
}
- channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
+ channelz.AddTraceEvent(logger, cc.channelz, 1, ted)
}
type idler ClientConn
@@ -339,14 +379,17 @@ func (i *idler) EnterIdleMode() {
(*ClientConn)(i).enterIdleMode()
}
-func (i *idler) ExitIdleMode() error {
- return (*ClientConn)(i).exitIdleMode()
+func (i *idler) ExitIdleMode() {
+ // Ignore the error returned from this method, because from the perspective
+ // of the caller (idleness manager), the channel would have always moved out
+ // of IDLE by the time this method returns.
+ (*ClientConn)(i).exitIdleMode()
}
// exitIdleMode moves the channel out of idle mode by recreating the name
// resolver and load balancer. This should never be called directly; use
// cc.idlenessMgr.ExitIdleMode instead.
-func (cc *ClientConn) exitIdleMode() (err error) {
+func (cc *ClientConn) exitIdleMode() error {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
@@ -354,11 +397,23 @@ func (cc *ClientConn) exitIdleMode() (err error) {
}
cc.mu.Unlock()
+ // Set state to CONNECTING before building the name resolver
+ // so the channel does not remain in IDLE.
+ cc.csMgr.updateState(connectivity.Connecting)
+
// This needs to be called without cc.mu because this builds a new resolver
// which might update state or report error inline, which would then need to
// acquire cc.mu.
if err := cc.resolverWrapper.start(); err != nil {
- return err
+ // If resolver creation fails, treat it like an error reported by the
+ // resolver before any valid updates. Set channel's state to
+ // TransientFailure, and set an erroring picker with the resolver build
+ // error, which will returned as part of any subsequent RPCs.
+ logger.Warningf("Failed to start resolver: %v", err)
+ cc.csMgr.updateState(connectivity.TransientFailure)
+ cc.mu.Lock()
+ cc.updateResolverStateAndUnlock(resolver.State{}, err)
+ return fmt.Errorf("failed to start resolver: %w", err)
}
cc.addTraceEvent("exiting idle mode")
@@ -456,7 +511,7 @@ func (cc *ClientConn) validateTransportCredentials() error {
func (cc *ClientConn) channelzRegistration(target string) {
parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
cc.channelz = channelz.RegisterChannel(parentChannel, target)
- cc.addTraceEvent("created")
+ cc.addTraceEvent(fmt.Sprintf("created for target %q", target))
}
// chainUnaryClientInterceptors chains all unary client interceptors into one.
@@ -621,7 +676,8 @@ type ClientConn struct {
channelz *channelz.Channel // Channelz object.
resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
idlenessMgr *idle.Manager
- metricsRecorderList *stats.MetricsRecorderList
+ metricsRecorderList *istats.MetricsRecorderList
+ statsHandler stats.Handler
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
@@ -678,10 +734,8 @@ func (cc *ClientConn) GetState() connectivity.State {
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
- if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
- cc.addTraceEvent(err.Error())
- return
- }
+ cc.idlenessMgr.ExitIdleMode()
+
// If the ClientConn was not in idle mode, we need to call ExitIdle on the
// LB policy so that connections can be created.
cc.mu.Lock()
@@ -732,8 +786,8 @@ func init() {
internal.EnterIdleModeForTesting = func(cc *ClientConn) {
cc.idlenessMgr.EnterIdleModeForTesting()
}
- internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
- return cc.idlenessMgr.ExitIdleMode()
+ internal.ExitIdleModeForTesting = func(cc *ClientConn) {
+ cc.idlenessMgr.ExitIdleMode()
}
}
@@ -858,6 +912,7 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
channelz: channelz.RegisterSubChannel(cc.channelz, ""),
resetBackoff: make(chan struct{}),
}
+ ac.updateTelemetryLabelsLocked()
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Start with our address set to the first address; this may be updated if
// we connect to different addresses.
@@ -974,7 +1029,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
}
ac.addrs = addrs
-
+ ac.updateTelemetryLabelsLocked()
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
@@ -1213,6 +1268,9 @@ type addrConn struct {
resetBackoff chan struct{}
channelz *channelz.SubChannel
+
+ localityLabel string
+ backendServiceLabel string
}
// Note: this requires a lock on ac.mu.
@@ -1220,6 +1278,18 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
+
+ // If we are transitioning out of Ready, it means there is a disconnection.
+ // A SubConn can also transition from CONNECTING directly to IDLE when
+ // a transport is successfully created, but the connection fails
+ // before the SubConn can send the notification for READY. We treat
+ // this as a successful connection and transition to IDLE.
+ // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
+ // part of the if condition below once the issue is fixed.
+ if ac.state == connectivity.Ready || (ac.state == connectivity.Connecting && s == connectivity.Idle) {
+ disconnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel, "unknown")
+ openConnectionsMetric.Record(ac.cc.metricsRecorderList, -1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel)
+ }
ac.state = s
ac.channelz.ChannelMetrics.State.Store(&s)
if lastErr == nil {
@@ -1277,6 +1347,15 @@ func (ac *addrConn) resetTransportAndUnlock() {
ac.mu.Unlock()
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
+ if !errors.Is(err, context.Canceled) {
+ connectionAttemptsFailedMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel)
+ } else {
+ if logger.V(2) {
+ // This records cancelled connection attempts which can be later
+ // replaced by a metric.
+ logger.Infof("Context cancellation detected; not recording this as a failed connection attempt.")
+ }
+ }
// TODO: #7534 - Move re-resolution requests into the pick_first LB policy
// to ensure one resolution request per pass instead of per subconn failure.
ac.cc.resolveNow(resolver.ResolveNowOptions{})
@@ -1316,10 +1395,50 @@ func (ac *addrConn) resetTransportAndUnlock() {
}
// Success; reset backoff.
ac.mu.Lock()
+ connectionAttemptsSucceededMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.localityLabel)
+ openConnectionsMetric.Record(ac.cc.metricsRecorderList, 1, ac.cc.target, ac.backendServiceLabel, ac.securityLevelLocked(), ac.localityLabel)
ac.backoffIdx = 0
ac.mu.Unlock()
}
+// updateTelemetryLabelsLocked calculates and caches the telemetry labels based on the
+// first address in addrConn.
+func (ac *addrConn) updateTelemetryLabelsLocked() {
+ labelsFunc, ok := internal.AddressToTelemetryLabels.(func(resolver.Address) map[string]string)
+ if !ok || len(ac.addrs) == 0 {
+ // Reset defaults
+ ac.localityLabel = ""
+ ac.backendServiceLabel = ""
+ return
+ }
+ labels := labelsFunc(ac.addrs[0])
+ ac.localityLabel = labels["grpc.lb.locality"]
+ ac.backendServiceLabel = labels["grpc.lb.backend_service"]
+}
+
+type securityLevelKey struct{}
+
+func (ac *addrConn) securityLevelLocked() string {
+ var secLevel string
+ // During disconnection, ac.transport is nil. Fall back to the security level
+ // stored in the current address during connection.
+ if ac.transport == nil {
+ secLevel, _ = ac.curAddr.Attributes.Value(securityLevelKey{}).(string)
+ return secLevel
+ }
+ authInfo := ac.transport.Peer().AuthInfo
+ if ci, ok := authInfo.(interface {
+ GetCommonAuthInfo() credentials.CommonAuthInfo
+ }); ok {
+ secLevel = ci.GetCommonAuthInfo().SecurityLevel.String()
+ // Store the security level in the current address' attributes so
+ // that it remains available for disconnection metrics after the
+ // transport is closed.
+ ac.curAddr.Attributes = ac.curAddr.Attributes.WithValue(securityLevelKey{}, secLevel)
+ }
+ return secLevel
+}
+
// tryAllAddrs tries to create a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index c8e337cd..06f6c6c7 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -44,8 +44,7 @@ type PerRPCCredentials interface {
// A54). uri is the URI of the entry point for the request. When supported
// by the underlying implementation, ctx can be used for timeout and
// cancellation. Additionally, RequestInfo data will be available via ctx
- // to this call. TODO(zhaoq): Define the set of the qualified keys instead
- // of leaving it as an arbitrary string.
+ // to this call.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 11d0ae14..dadd21e4 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -27,8 +27,10 @@ package encoding
import (
"io"
+ "slices"
"strings"
+ "google.golang.org/grpc/encoding/internal"
"google.golang.org/grpc/internal/grpcutil"
)
@@ -36,6 +38,24 @@ import (
// It is intended for grpc internal use only.
const Identity = "identity"
+func init() {
+ internal.RegisterCompressorForTesting = func(c Compressor) func() {
+ name := c.Name()
+ curCompressor, found := registeredCompressor[name]
+ RegisterCompressor(c)
+ return func() {
+ if found {
+ registeredCompressor[name] = curCompressor
+ return
+ }
+ delete(registeredCompressor, name)
+ grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool {
+ return s == name
+ })
+ }
+ }
+}
+
// Compressor is used for compressing and decompressing when sending or
// receiving messages.
//
diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go
new file mode 100644
index 00000000..ee9acb43
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go
@@ -0,0 +1,28 @@
+/*
+ *
+ * Copyright 2025 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code internal to the encoding package.
+package internal
+
+// RegisterCompressorForTesting registers a compressor in the global compressor
+// registry. It returns a cleanup function that should be called at the end
+// of the test to unregister the compressor.
+//
+// This prevents compressors registered in one test from appearing in the
+// encoding headers of subsequent tests.
+var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func()
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index ceec319d..1ab874c7 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -46,9 +46,25 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
+ // Important: if we remove this Size call then we cannot use
+ // UseCachedSize in MarshalOptions below.
size := proto.Size(vv)
+
+ // MarshalOptions with UseCachedSize allows reusing the result from the
+ // previous Size call. This is safe here because:
+ //
+ // 1. We just computed the size.
+ // 2. We assume the message is not being mutated concurrently.
+ //
+ // Important: If the proto.Size call above is removed, using UseCachedSize
+ // becomes unsafe and may lead to incorrect marshaling.
+ //
+ // For more details, see the doc of UseCachedSize:
+ // https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions
+ marshalOptions := proto.MarshalOptions{UseCachedSize: true}
+
if mem.IsBelowBufferPoolingThreshold(size) {
- buf, err := proto.Marshal(vv)
+ buf, err := marshalOptions.Marshal(vv)
if err != nil {
return nil, err
}
@@ -56,7 +72,7 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
} else {
pool := mem.DefaultBufferPool()
buf := pool.Get(size)
- if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
+ if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil {
pool.Put(buf)
return nil, err
}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
index ad75313a..472813f5 100644
--- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -75,6 +75,8 @@ const (
MetricTypeIntHisto
MetricTypeFloatHisto
MetricTypeIntGauge
+ MetricTypeIntUpDownCount
+ MetricTypeIntAsyncGauge
)
// Int64CountHandle is a typed handle for a int count metric. This handle
@@ -93,6 +95,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels .
recorder.RecordInt64Count(h, incr, labels...)
}
+// Int64UpDownCountHandle is a typed handle for an int up-down counter metric.
+// This handle is passed at the recording point in order to know which metric
+// to record on.
+type Int64UpDownCountHandle MetricDescriptor
+
+// Descriptor returns the int64 up-down counter handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 up-down counter value on the metrics recorder provided.
+// The value 'v' can be positive to increment or negative to decrement.
+func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) {
+ recorder.RecordInt64UpDownCount(h, v, labels...)
+}
+
// Float64CountHandle is a typed handle for a float count metric. This handle is
// passed at the recording point in order to know which metric to record on.
type Float64CountHandle MetricDescriptor
@@ -154,6 +173,30 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels .
recorder.RecordInt64Gauge(h, incr, labels...)
}
+// AsyncMetric is a marker interface for asynchronous metric types.
+type AsyncMetric interface {
+ isAsync()
+ Descriptor() *MetricDescriptor
+}
+
+// Int64AsyncGaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64AsyncGaugeHandle MetricDescriptor
+
+// isAsync implements the AsyncMetric interface.
+func (h *Int64AsyncGaugeHandle) isAsync() {}
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64AsyncGaugeHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 gauge value on the metrics recorder provided.
+func (h *Int64AsyncGaugeHandle) Record(recorder AsyncMetricsRecorder, value int64, labels ...string) {
+ recorder.RecordInt64AsyncGauge(h, value, labels...)
+}
+
// registeredMetrics are the registered metric descriptor names.
var registeredMetrics = make(map[string]bool)
@@ -249,6 +292,35 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
return (*Int64GaugeHandle)(descPtr)
}
+// RegisterInt64UpDownCount registers the metric description onto the global registry.
+// It returns a typed handle to use for recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ // Set the specific metric type for the up-down counter
+ descriptor.Type = MetricTypeIntUpDownCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64UpDownCountHandle)(descPtr)
+}
+
+// RegisterInt64AsyncGauge registers the metric description onto the global registry.
+// It returns a typed handle to use for recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64AsyncGauge(descriptor MetricDescriptor) *Int64AsyncGaugeHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntAsyncGauge
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64AsyncGaugeHandle)(descPtr)
+}
+
// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
// registry. Returns a cleanup function that sets the metrics registry to its
// original state.
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
index ee142360..d7d404cb 100644
--- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -38,6 +38,16 @@ type MetricsRecorder interface {
// RecordInt64Gauge records the measurement alongside labels on the int
// gauge associated with the provided handle.
RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+ // RecordInt64UpDownCounter records the measurement alongside labels on the int
+ // count associated with the provided handle.
+ RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string)
+}
+
+// AsyncMetricsRecorder records on asynchronous metrics derived from metric registry.
+type AsyncMetricsRecorder interface {
+ // RecordInt64AsyncGauge records the measurement alongside labels on the int
+ // count associated with the provided handle asynchronously
+ RecordInt64AsyncGauge(handle *Int64AsyncGaugeHandle, incr int64, labels ...string)
}
// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 22d263fb..8f7d9f6b 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.6
+// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: grpc/health/v1/health.proto
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index f2c01f29..e99cd5c8 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.5.1
+// - protoc-gen-go-grpc v1.6.0
// - protoc v5.27.1
// source: grpc/health/v1/health.proto
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
index ba25b898..f38de74a 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -67,6 +67,10 @@ type Balancer struct {
// balancerCurrent before the UpdateSubConnState is called on the
// balancerCurrent.
currentMu sync.Mutex
+
+ // activeGoroutines tracks all the goroutines that this balancer has started
+ // and that should be waited on when the balancer closes.
+ activeGoroutines sync.WaitGroup
}
// swap swaps out the current lb with the pending lb and updates the ClientConn.
@@ -76,7 +80,9 @@ func (gsb *Balancer) swap() {
cur := gsb.balancerCurrent
gsb.balancerCurrent = gsb.balancerPending
gsb.balancerPending = nil
+ gsb.activeGoroutines.Add(1)
go func() {
+ defer gsb.activeGoroutines.Done()
gsb.currentMu.Lock()
defer gsb.currentMu.Unlock()
cur.Close()
@@ -274,6 +280,7 @@ func (gsb *Balancer) Close() {
currentBalancerToClose.Close()
pendingBalancerToClose.Close()
+ gsb.activeGoroutines.Wait()
}
// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
@@ -324,7 +331,12 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) {
defer bw.gsb.mu.Unlock()
bw.lastState = state
+ // If Close() acquires the mutex before UpdateState(), the balancer
+ // will already have been removed from the current or pending state when
+ // reaching this point.
if !bw.gsb.balancerCurrentOrPending(bw) {
+ // Returning here ensures that (*Balancer).swap() is not invoked after
+ // (*Balancer).Close() and therefore prevents "use after close".
return
}
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
index 11f91668..467392b8 100644
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -83,6 +83,7 @@ func (b *Unbounded) Load() {
default:
}
} else if b.closing && !b.closed {
+ b.closed = true
close(b.c)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
index 2bffe477..3b7ba596 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/trace.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go
@@ -194,7 +194,7 @@ func (r RefChannelType) String() string {
// If channelz is not turned ON, this will simply log the event descriptions.
func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
// Log only the trace description associated with the bottom most entity.
- d := fmt.Sprintf("[%s]%s", e, desc.Desc)
+ d := fmt.Sprintf("[%s] %s", e, desc.Desc)
switch desc.Severity {
case CtUnknown, CtInfo:
l.InfoDepth(depth+1, d)
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 7e060f5e..6414ee4b 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -52,12 +52,6 @@ var (
// or "false".
EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
- // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
- // instead of the exiting pickfirst implementation. This can be disabled by
- // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
- // to "false".
- NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true)
-
// XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash
// key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by
// setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the
@@ -75,6 +69,19 @@ var (
// ALTSHandshakerKeepaliveParams is set if we should add the
// KeepaliveParams when dial the ALTS handshaker service.
ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false)
+
+ // EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443
+ // to a target address that lacks one. This flag only has an effect when all of
+ // the following conditions are met:
+ // - A connect proxy is being used.
+ // - Target resolution is disabled.
+ // - The DNS resolver is being used.
+ EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true)
+
+ // XDSAuthorityRewrite indicates whether xDS authority rewriting is enabled.
+ // This feature is defined in gRFC A81 and is enabled by setting the
+ // environment variable GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE to "true".
+ XDSAuthorityRewrite = boolFromEnv("GRPC_EXPERIMENTAL_XDS_AUTHORITY_REWRITE", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index e8755155..7685d08b 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -68,4 +68,15 @@ var (
// trust. For more details, see:
// https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md
XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false)
+
+ // XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata
+ // configuring use of an HTTP CONNECT proxy via xDS from cluster resources.
+ // For more details, see:
+ // https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md
+ XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false)
+
+ // XDSBootstrapCallCredsEnabled controls if call credentials can be used in
+ // xDS bootstrap configuration via the `call_creds` field. For more details,
+ // see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md
+ XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false)
)
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
index 7617be21..c90cc51b 100644
--- a/vendor/google.golang.org/grpc/internal/experimental.go
+++ b/vendor/google.golang.org/grpc/internal/experimental.go
@@ -25,4 +25,8 @@ var (
// BufferPool is implemented by the grpc package and returns a server
// option to configure a shared buffer pool for a grpc.Server.
BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+
+ // AcceptCompressors is implemented by the grpc package and returns
+ // a call option that restricts the grpc-accept-encoding header for a call.
+ AcceptCompressors any // func(...string) grpc.CallOption
)
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index 8e8e8612..9b6d8a1f 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -80,25 +80,11 @@ func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure
func (cs *CallbackSerializer) run(ctx context.Context) {
defer close(cs.done)
- // TODO: when Go 1.21 is the oldest supported version, this loop and Close
- // can be replaced with:
- //
- // context.AfterFunc(ctx, cs.callbacks.Close)
- for ctx.Err() == nil {
- select {
- case <-ctx.Done():
- // Do nothing here. Next iteration of the for loop will not happen,
- // since ctx.Err() would be non-nil.
- case cb := <-cs.callbacks.Get():
- cs.callbacks.Load()
- cb.(func(context.Context))(ctx)
- }
- }
-
- // Close the buffer to prevent new callbacks from being added.
- cs.callbacks.Close()
+ // Close the buffer when the context is canceled
+ // to prevent new callbacks from being added.
+ context.AfterFunc(ctx, cs.callbacks.Close)
- // Run all pending callbacks.
+ // Run all callbacks.
for cb := range cs.callbacks.Get() {
cs.callbacks.Load()
cb.(func(context.Context))(ctx)
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
index 2c13ee9d..d3cd24f8 100644
--- a/vendor/google.golang.org/grpc/internal/idle/idle.go
+++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -21,7 +21,6 @@
package idle
import (
- "fmt"
"math"
"sync"
"sync/atomic"
@@ -33,15 +32,15 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
return time.AfterFunc(d, f)
}
-// Enforcer is the functionality provided by grpc.ClientConn to enter
-// and exit from idle mode.
-type Enforcer interface {
- ExitIdleMode() error
+// ClientConn is the functionality provided by grpc.ClientConn to enter and exit
+// from idle mode.
+type ClientConn interface {
+ ExitIdleMode()
EnterIdleMode()
}
-// Manager implements idleness detection and calls the configured Enforcer to
-// enter/exit idle mode when appropriate. Must be created by NewManager.
+// Manager implements idleness detection and calls the ClientConn to enter/exit
+// idle mode when appropriate. Must be created by NewManager.
type Manager struct {
// State accessed atomically.
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
@@ -51,8 +50,8 @@ type Manager struct {
// Can be accessed without atomics or mutex since these are set at creation
// time and read-only after that.
- enforcer Enforcer // Functionality provided by grpc.ClientConn.
- timeout time.Duration
+ cc ClientConn // Functionality provided by grpc.ClientConn.
+ timeout time.Duration
// idleMu is used to guarantee mutual exclusion in two scenarios:
// - Opposing intentions:
@@ -72,9 +71,9 @@ type Manager struct {
// NewManager creates a new idleness manager implementation for the
// given idle timeout. It begins in idle mode.
-func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
+func NewManager(cc ClientConn, timeout time.Duration) *Manager {
return &Manager{
- enforcer: enforcer,
+ cc: cc,
timeout: timeout,
actuallyIdle: true,
activeCallsCount: -math.MaxInt32,
@@ -127,7 +126,7 @@ func (m *Manager) handleIdleTimeout() {
// Now that we've checked that there has been no activity, attempt to enter
// idle mode, which is very likely to succeed.
- if m.tryEnterIdleMode() {
+ if m.tryEnterIdleMode(true) {
// Successfully entered idle mode. No timer needed until we exit idle.
return
}
@@ -142,10 +141,13 @@ func (m *Manager) handleIdleTimeout() {
// that, it performs a last minute check to ensure that no new RPC has come in,
// making the channel active.
//
+// checkActivity controls if a check for RPC activity, since the last time the
+// idle_timeout fired, is made.
+
// Return value indicates whether or not the channel moved to idle mode.
//
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
-func (m *Manager) tryEnterIdleMode() bool {
+func (m *Manager) tryEnterIdleMode(checkActivity bool) bool {
// Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
// that the channel is either in idle mode or is trying to get there.
if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
@@ -166,7 +168,7 @@ func (m *Manager) tryEnterIdleMode() bool {
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
return false
}
- if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
+ if checkActivity && atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
// A very short RPC could have come in (and also finished) after we
// checked for calls count and activity in handleIdleTimeout(), but
// before the CAS operation. So, we need to check for activity again.
@@ -177,44 +179,37 @@ func (m *Manager) tryEnterIdleMode() bool {
// No new RPCs have come in since we set the active calls count value to
// -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
// unconditionally now.
- m.enforcer.EnterIdleMode()
+ m.cc.EnterIdleMode()
m.actuallyIdle = true
return true
}
// EnterIdleModeForTesting instructs the channel to enter idle mode.
func (m *Manager) EnterIdleModeForTesting() {
- m.tryEnterIdleMode()
+ m.tryEnterIdleMode(false)
}
// OnCallBegin is invoked at the start of every RPC.
-func (m *Manager) OnCallBegin() error {
+func (m *Manager) OnCallBegin() {
if m.isClosed() {
- return nil
+ return
}
if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
// Channel is not idle now. Set the activity bit and allow the call.
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
- return nil
+ return
}
// Channel is either in idle mode or is in the process of moving to idle
// mode. Attempt to exit idle mode to allow this RPC.
- if err := m.ExitIdleMode(); err != nil {
- // Undo the increment to calls count, and return an error causing the
- // RPC to fail.
- atomic.AddInt32(&m.activeCallsCount, -1)
- return err
- }
-
+ m.ExitIdleMode()
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
- return nil
}
-// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
+// ExitIdleMode instructs m to call the ClientConn's ExitIdleMode and update its
// internal state.
-func (m *Manager) ExitIdleMode() error {
+func (m *Manager) ExitIdleMode() {
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
m.idleMu.Lock()
defer m.idleMu.Unlock()
@@ -231,12 +226,10 @@ func (m *Manager) ExitIdleMode() error {
// m.ExitIdleMode.
//
// In any case, there is nothing to do here.
- return nil
+ return
}
- if err := m.enforcer.ExitIdleMode(); err != nil {
- return fmt.Errorf("failed to exit idle mode: %w", err)
- }
+ m.cc.ExitIdleMode()
// Undo the idle entry process. This also respects any new RPC attempts.
atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
@@ -244,7 +237,23 @@ func (m *Manager) ExitIdleMode() error {
// Start a new timer to fire after the configured idle timeout.
m.resetIdleTimerLocked(m.timeout)
- return nil
+}
+
+// UnsafeSetNotIdle instructs the Manager to update its internal state to
+// reflect the reality that the channel is no longer in IDLE mode.
+//
+// N.B. This method is intended only for internal use by the gRPC client
+// when it exits IDLE mode **manually** from `Dial`. The callsite must ensure:
+// - The channel was **actually in IDLE mode** immediately prior to the call.
+// - There is **no concurrent activity** that could cause the channel to exit
+// IDLE mode *naturally* at the same time.
+func (m *Manager) UnsafeSetNotIdle() {
+ m.idleMu.Lock()
+ defer m.idleMu.Unlock()
+
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+ m.actuallyIdle = false
+ m.resetIdleTimerLocked(m.timeout)
}
// OnCallEnd is invoked at the end of every RPC.
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 2699223a..27bef83d 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -244,6 +244,10 @@ var (
// When set, the function will be called before the stream enters
// the blocking state.
NewStreamWaitingForResolver = func() {}
+
+ // AddressToTelemetryLabels is an xDS-provided function to extract telemetry
+ // labels from a resolver.Address. Callers must assert its type before calling.
+ AddressToTelemetryLabels any // func(addr resolver.Address) map[string]string
)
// HealthChecker defines the signature of the client-side LB channel health
diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
index 20b8fb09..5bfa67b7 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
@@ -22,11 +22,13 @@ package delegatingresolver
import (
"fmt"
+ "net"
"net/http"
"net/url"
"sync"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/proxyattributes"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/internal/transport/networktype"
@@ -40,6 +42,8 @@ var (
HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
)
+const defaultPort = "443"
+
// delegatingResolver manages both target URI and proxy address resolution by
// delegating these tasks to separate child resolvers. Essentially, it acts as
// an intermediary between the gRPC ClientConn and the child resolvers.
@@ -107,10 +111,18 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
targetResolver: nopResolver{},
}
+ addr := target.Endpoint()
var err error
- r.proxyURL, err = proxyURLForTarget(target.Endpoint())
+ if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget {
+ addr, err = parseTarget(addr)
+ if err != nil {
+ return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err)
+ }
+ }
+
+ r.proxyURL, err = proxyURLForTarget(addr)
if err != nil {
- return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
+ return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err)
}
// proxy is not configured or proxy address excluded using `NO_PROXY` env
@@ -132,8 +144,8 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
// bypass the target resolver and store the unresolved target address.
if target.URL.Scheme == "dns" && !targetResolutionEnabled {
r.targetResolverState = &resolver.State{
- Addresses: []resolver.Address{{Addr: target.Endpoint()}},
- Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
+ Addresses: []resolver.Address{{Addr: addr}},
+ Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}},
}
r.updateTargetResolverState(*r.targetResolverState)
return r, nil
@@ -202,6 +214,44 @@ func needsProxyResolver(state *resolver.State) bool {
return false
}
+// parseTarget takes a target string and ensures it is a valid "host:port" target.
+//
+// It does the following:
+// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"),
+// it is returned as is.
+// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost",
+// returning "localhost:80".
+// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort
+// is added.
+//
+// An error is returned for empty targets or targets with a trailing colon
+// but no port (e.g., "host:").
+func parseTarget(target string) (string, error) {
+ if target == "" {
+ return "", fmt.Errorf("missing address")
+ }
+
+ host, port, err := net.SplitHostPort(target)
+ if err != nil {
+ // If SplitHostPort fails, it's likely because the port is missing.
+ // We append the default port and return the result.
+ return net.JoinHostPort(target, defaultPort), nil
+ }
+
+ // If SplitHostPort succeeds, we check for edge cases.
+ if port == "" {
+ // A success with an empty port means the target had a trailing colon,
+ // e.g., "host:", which is an error.
+ return "", fmt.Errorf("missing port after port-separator colon")
+ }
+ if host == "" {
+ // A success with an empty host means the target was like ":80".
+ // We default the host to "localhost".
+ host = "localhost"
+ }
+ return net.JoinHostPort(host, port), nil
+}
+
func skipProxy(address resolver.Address) bool {
// Avoid proxy when network is not tcp.
networkType, ok := networktype.Get(address)
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
index 79044657..d5f7e4d6 100644
--- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle,
}
}
+// RecordInt64UpDownCount records the measurement alongside labels on the int
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64UpDownCount(handle, incr, labels...)
+ }
+}
+
// RecordFloat64Count records the measurement alongside labels on the float
// count associated with the provided handle.
func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go
new file mode 100644
index 00000000..49019b80
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/stats.go
@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2025 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+ "context"
+
+ "google.golang.org/grpc/stats"
+)
+
+type combinedHandler struct {
+ handlers []stats.Handler
+}
+
+// NewCombinedHandler combines multiple stats.Handlers into a single handler.
+//
+// It returns nil if no handlers are provided. If only one handler is
+// provided, it is returned directly without wrapping.
+func NewCombinedHandler(handlers ...stats.Handler) stats.Handler {
+ switch len(handlers) {
+ case 0:
+ return nil
+ case 1:
+ return handlers[0]
+ default:
+ return &combinedHandler{handlers: handlers}
+ }
+}
+
+func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ for _, h := range ch.handlers {
+ ctx = h.TagRPC(ctx, info)
+ }
+ return ctx
+}
+
+func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) {
+ for _, h := range ch.handlers {
+ h.HandleRPC(ctx, stats)
+ }
+}
+
+func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ for _, h := range ch.handlers {
+ ctx = h.TagConn(ctx, info)
+ }
+ return ctx
+}
+
+func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) {
+ for _, h := range ch.handlers {
+ h.HandleConn(ctx, stats)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
index ccc0e017..98045251 100644
--- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
@@ -29,25 +29,27 @@ import (
// ClientStream implements streaming functionality for a gRPC client.
type ClientStream struct {
- *Stream // Embed for common stream functionality.
+ Stream // Embed for common stream functionality.
ct *http2Client
done chan struct{} // closed at the end of stream to unblock writers.
doneFunc func() // invoked at the end of stream.
- headerChan chan struct{} // closed to indicate the end of header metadata.
- headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ headerChan chan struct{} // closed to indicate the end of header metadata.
+ header metadata.MD // the received header metadata
+
+ status *status.Status // the status error received from the server
+
+ // Non-pointer fields are at the end to optimize GC allocations.
+
// headerValid indicates whether a valid header was received. Only
// meaningful after headerChan is closed (always call waitOnHeader() before
// reading its value).
- headerValid bool
- header metadata.MD // the received header metadata
- noHeaders bool // set if the client never received headers (set only after the stream is done).
-
- bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
- unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
-
- status *status.Status // the status error received from the server
+ headerValid bool
+ noHeaders bool // set if the client never received headers (set only after the stream is done).
+ headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
+ unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
}
// Read reads an n byte message from the input stream.
@@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool {
func (s *ClientStream) Status() *status.Status {
return s.status
}
+
+func (s *ClientStream) requestRead(n int) {
+ s.ct.adjustWindow(s, uint32(n))
+}
+
+func (s *ClientStream) updateWindow(n int) {
+ s.ct.updateWindow(s, uint32(n))
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index a2831e5d..2dcd1e63 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -496,6 +496,16 @@ const (
serverSide
)
+// maxWriteBufSize is the maximum length (number of elements) the cached
+// writeBuf can grow to. The length depends on the number of buffers
+// contained within the BufferSlice produced by the codec, which is
+// generally small.
+//
+// If a writeBuf larger than this limit is required, it will be allocated
+// and freed after use, rather than being cached. This avoids holding
+// on to large amounts of memory.
+const maxWriteBufSize = 64
+
// Loopy receives frames from the control buffer.
// Each frame is handled individually; most of the work done by loopy goes
// into handling data frames. Loopy maintains a queue of active streams, and each
@@ -530,6 +540,8 @@ type loopyWriter struct {
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
+
+ writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek.
}
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
@@ -665,11 +677,10 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
func (l *loopyWriter) registerStreamHandler(h *registerStream) {
str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- reader: mem.BufferSlice{}.Reader(),
+ id: h.streamID,
+ state: empty,
+ itl: &itemList{},
+ wq: h.wq,
}
l.estdStreams[h.streamID] = str
}
@@ -701,11 +712,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
}
// Case 2: Client wants to originate stream.
str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- reader: mem.BufferSlice{}.Reader(),
+ id: h.streamID,
+ state: empty,
+ itl: &itemList{},
+ wq: h.wq,
}
return l.originateStream(str, h)
}
@@ -948,11 +958,11 @@ func (l *loopyWriter) processData() (bool, error) {
if str == nil {
return true, nil
}
- reader := str.reader
+ reader := &str.reader
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
if !dataItem.processing {
dataItem.processing = true
- str.reader.Reset(dataItem.data)
+ reader.Reset(dataItem.data)
dataItem.data.Free()
}
// A data item is represented by a dataFrame, since it later translates into
@@ -964,11 +974,11 @@ func (l *loopyWriter) processData() (bool, error) {
if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
- if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
+ if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
- _ = reader.Close()
+ reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -1001,25 +1011,20 @@ func (l *loopyWriter) processData() (bool, error) {
remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize
size := hSize + dSize
- var buf *[]byte
-
- if hSize != 0 && dSize == 0 {
- buf = &dataItem.h
- } else {
- // Note: this is only necessary because the http2.Framer does not support
- // partially writing a frame, so the sequence must be materialized into a buffer.
- // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
- pool := l.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
+ l.writeBuf = l.writeBuf[:0]
+ if hSize > 0 {
+ l.writeBuf = append(l.writeBuf, dataItem.h[:hSize])
+ }
+ if dSize > 0 {
+ var err error
+ l.writeBuf, err = reader.Peek(dSize, l.writeBuf)
+ if err != nil {
+ // This must never happen since the reader must have at least dSize
+ // bytes.
+ // Log an error to fail tests.
+ l.logger.Errorf("unexpected error while reading Data frame payload: %v", err)
+ return false, err
}
- buf = pool.Get(size)
- defer pool.Put(buf)
-
- copy((*buf)[:hSize], dataItem.h)
- _, _ = reader.Read((*buf)[hSize:])
}
// Now that outgoing flow controls are checked we can replenish str's write quota
@@ -1032,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) {
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
+ err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf)
+ reader.Discard(dSize)
+ if cap(l.writeBuf) > maxWriteBufSize {
+ l.writeBuf = nil
+ } else {
+ clear(l.writeBuf)
+ }
+ if err != nil {
return false, err
}
str.bytesOutStanding += size
@@ -1040,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem.h = dataItem.h[hSize:]
if remainingBytes == 0 { // All the data from that message was written out.
- _ = reader.Close()
+ reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index dfc0f224..7cfbc963 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -28,7 +28,7 @@ import (
// writeQuota is a soft limit on the amount of data a stream can
// schedule before some of it is written out.
type writeQuota struct {
- quota int32
+ _ noCopy
// get waits on read from when quota goes less than or equal to zero.
// replenish writes on it when quota goes positive again.
ch chan struct{}
@@ -38,16 +38,17 @@ type writeQuota struct {
// It is implemented as a field so that it can be updated
// by tests.
replenish func(n int)
+ quota int32
}
-func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
- w := &writeQuota{
- quota: sz,
- ch: make(chan struct{}, 1),
- done: done,
- }
+// init allows a writeQuota to be initialized in-place, which is useful for
+// resetting a buffer or for avoiding a heap allocation when the buffer is
+// embedded in another struct.
+func (w *writeQuota) init(sz int32, done <-chan struct{}) {
+ w.quota = sz
+ w.ch = make(chan struct{}, 1)
+ w.done = done
w.replenish = w.realReplenish
- return w
}
func (w *writeQuota) get(sz int32) error {
@@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error {
func (w *writeQuota) realReplenish(n int) {
sz := int32(n)
- a := atomic.AddInt32(&w.quota, sz)
- b := a - sz
- if b <= 0 && a > 0 {
+ newQuota := atomic.AddInt32(&w.quota, sz)
+ previousQuota := newQuota - sz
+ if previousQuota <= 0 && newQuota > 0 {
select {
case w.ch <- struct{}{}:
default:
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 3dea2357..7ab3422b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -50,7 +50,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
if r.Method != http.MethodPost {
w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
@@ -170,7 +170,7 @@ type serverHandlerTransport struct {
// TODO make sure this is consistent across handler_server and http2_server
contentSubtype string
- stats []stats.Handler
+ stats stats.Handler
logger *grpclog.PrefixLogger
bufferPool mem.BufferPool
@@ -274,14 +274,14 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status
}
})
- if err == nil { // transport has not been closed
+ if err == nil && ht.stats != nil { // transport has not been closed
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
- for _, sh := range ht.stats {
- sh.HandleRPC(s.Context(), &stats.OutTrailer{
- Trailer: s.trailer.Copy(),
- })
- }
+ s.hdrMu.Lock()
+ ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+ Trailer: s.trailer.Copy(),
+ })
+ s.hdrMu.Unlock()
}
ht.Close(errors.New("finished writing status"))
return err
@@ -372,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e
ht.rw.(http.Flusher).Flush()
})
- if err == nil {
- for _, sh := range ht.stats {
- // Note: The header fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- sh.HandleRPC(s.Context(), &stats.OutHeader{
- Header: md.Copy(),
- Compression: s.sendCompress,
- })
- }
+ if err == nil && ht.stats != nil {
+ // Note: The header fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
+ Header: md.Copy(),
+ Compression: s.sendCompress,
+ })
}
return err
}
+func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) {
+}
+
+func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) {
+}
+
func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
var cancel context.CancelFunc
@@ -409,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
req := ht.req
s := &ServerStream{
- Stream: &Stream{
+ Stream: Stream{
id: 0, // irrelevant
ctx: ctx,
- requestRead: func(int) {},
- buf: newRecvBuffer(),
method: req.URL.Path,
recvCompress: req.Header.Get("grpc-encoding"),
contentSubtype: ht.contentSubtype,
@@ -422,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
st: ht,
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
- s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
- windowHandler: func(int) {},
+ s.Stream.buf.init()
+ s.readRequester = s
+ s.trReader = transportReader{
+ reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf},
+ windowHandler: s,
}
// readerDone is closed when the Body.Read-ing goroutine exits.
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 5467fe97..38ca031a 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -44,6 +44,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
"google.golang.org/grpc/internal/proxyattributes"
+ istats "google.golang.org/grpc/internal/stats"
istatus "google.golang.org/grpc/internal/status"
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
@@ -105,7 +106,7 @@ type http2Client struct {
kp keepalive.ClientParameters
keepaliveEnabled bool
- statsHandlers []stats.Handler
+ statsHandler stats.Handler
initialWindowSize int32
@@ -335,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
keepaliveDone: make(chan struct{}),
- framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
+ framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
activeStreams: make(map[uint32]*ClientStream),
isSecure: isSecure,
perRPCCreds: perRPCCreds,
kp: kp,
- statsHandlers: opts.StatsHandlers,
+ statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...),
initialWindowSize: initialWindowSize,
nextID: 1,
maxConcurrentStreams: defaultMaxStreamsClient,
@@ -369,7 +370,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
})
t.logger = prefixLoggerForClientTransport(t)
// Add peer information to the http2client context.
- t.ctx = peer.NewContext(t.ctx, t.getPeer())
+ t.ctx = peer.NewContext(t.ctx, t.Peer())
if md, ok := addr.Metadata.(*metadata.MD); ok {
t.md = *md
@@ -386,15 +387,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
updateFlowControl: t.updateFlowControl,
}
}
- for _, sh := range t.statsHandlers {
- t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
+ if t.statsHandler != nil {
+ t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
- connBegin := &stats.ConnBegin{
+ t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{
Client: true,
- }
- sh.HandleConn(t.ctx, connBegin)
+ })
}
if t.keepaliveEnabled {
t.kpDormancyCond = sync.NewCond(&t.mu)
@@ -481,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
s := &ClientStream{
- Stream: &Stream{
+ Stream: Stream{
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
- buf: newRecvBuffer(),
contentSubtype: callHdr.ContentSubtype,
},
ct: t,
@@ -492,31 +491,26 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt
headerChan: make(chan struct{}),
doneFunc: callHdr.DoneFunc,
}
- s.wq = newWriteQuota(defaultWriteQuota, s.done)
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
+ s.Stream.buf.init()
+ s.Stream.wq.init(defaultWriteQuota, s.done)
+ s.readRequester = s
// The client side stream context should have exactly the same life cycle with the user provided context.
// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
// So we use the original context here instead of creating a copy.
s.ctx = ctx
- s.trReader = &transportReader{
- reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctx.Done(),
- recv: s.buf,
- closeStream: func(err error) {
- s.Close(err)
- },
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
+ s.trReader = transportReader{
+ reader: recvBufferReader{
+ ctx: s.ctx,
+ ctxDone: s.ctx.Done(),
+ recv: &s.buf,
+ clientStream: s,
},
+ windowHandler: s,
}
return s
}
-func (t *http2Client) getPeer() *peer.Peer {
+func (t *http2Client) Peer() *peer.Peer {
return &peer.Peer{
Addr: t.remoteAddr,
AuthInfo: t.authInfo, // Can be nil
@@ -556,6 +550,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
// Make the slice of certain predictable size to reduce allocations made by append.
hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
hfLen += len(authData) + len(callAuthData)
+ registeredCompressors := t.registeredCompressors
+ if callHdr.AcceptedCompressors != nil {
+ registeredCompressors = *callHdr.AcceptedCompressors
+ }
+ if callHdr.PreviousAttempts > 0 {
+ hfLen++
+ }
+ if callHdr.SendCompress != "" {
+ hfLen++
+ }
+ if registeredCompressors != "" {
+ hfLen++
+ }
+ if _, ok := ctx.Deadline(); ok {
+ hfLen++
+ }
headerFields := make([]hpack.HeaderField, 0, hfLen)
headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
@@ -568,7 +578,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
}
- registeredCompressors := t.registeredCompressors
if callHdr.SendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
// Include the outgoing compressor name when compressor is not registered
@@ -736,7 +745,7 @@ func (e NewStreamError) Error() string {
// NewStream creates a stream and registers it into the transport as "active"
// streams. All non-nil errors returned will be *NewStreamError.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) {
- ctx = peer.NewContext(ctx, t.getPeer())
+ ctx = peer.NewContext(ctx, t.Peer())
// ServerName field of the resolver returned address takes precedence over
// Host field of CallHdr to determine the :authority header. This is because,
@@ -811,7 +820,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
return nil
},
onOrphaned: cleanup,
- wq: s.wq,
+ wq: &s.wq,
}
firstTry := true
var ch chan struct{}
@@ -842,7 +851,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
transportDrainRequired = t.nextID > MaxStreamID
s.id = hdr.streamID
- s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
+ s.fc = inFlow{limit: uint32(t.initialWindowSize)}
t.activeStreams[s.id] = s
t.mu.Unlock()
@@ -893,27 +902,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
}
}
- if len(t.statsHandlers) != 0 {
+ if t.statsHandler != nil {
header, ok := metadata.FromOutgoingContext(ctx)
if ok {
header.Set("user-agent", t.userAgent)
} else {
header = metadata.Pairs("user-agent", t.userAgent)
}
- for _, sh := range t.statsHandlers {
- // Note: The header fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- // Note: Creating a new stats object to prevent pollution.
- outHeader := &stats.OutHeader{
- Client: true,
- FullMethod: callHdr.Method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: callHdr.SendCompress,
- Header: header,
- }
- sh.HandleRPC(s.ctx, outHeader)
- }
+ // Note: The header fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{
+ Client: true,
+ FullMethod: callHdr.Method,
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ Compression: callHdr.SendCompress,
+ Header: header,
+ })
}
if transportDrainRequired {
if t.logger.V(logLevel) {
@@ -990,6 +995,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode
// accessed anymore.
func (t *http2Client) Close(err error) {
t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
+ // For background on the deadline value chosen here, see
+ // https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 .
+ t.conn.SetReadDeadline(time.Now().Add(time.Second))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@@ -1051,11 +1059,10 @@ func (t *http2Client) Close(err error) {
for _, s := range streams {
t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
}
- for _, sh := range t.statsHandlers {
- connEnd := &stats.ConnEnd{
+ if t.statsHandler != nil {
+ t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{
Client: true,
- }
- sh.HandleConn(t.ctx, connEnd)
+ })
}
}
@@ -1166,7 +1173,7 @@ func (t *http2Client) updateFlowControl(n uint32) {
})
}
-func (t *http2Client) handleData(f *http2.DataFrame) {
+func (t *http2Client) handleData(f *parsedDataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
@@ -1210,22 +1217,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
return
}
+ dataLen := f.data.Len()
if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+ if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- pool := t.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
- }
- s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
+ if dataLen > 0 {
+ f.data.Ref()
+ s.write(recvMsg{buffer: f.data})
}
}
// The server has closed the stream without sending trailers. Record that
@@ -1465,17 +1465,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
contentTypeErr = "malformed header: missing HTTP content-type"
grpcMessage string
recvCompress string
- httpStatusCode *int
httpStatusErr string
- rawStatusCode = codes.Unknown
+ // the code from the grpc-status header, if present
+ grpcStatusCode = codes.Unknown
// headerError is set if an error is encountered while parsing the headers
headerError string
+ httpStatus string
)
- if initialHeader {
- httpStatusErr = "malformed header: missing HTTP status"
- }
-
for _, hf := range frame.Fields {
switch hf.Name {
case "content-type":
@@ -1491,35 +1488,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
case "grpc-status":
code, err := strconv.ParseInt(hf.Value, 10, 32)
if err != nil {
- se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
+ se := status.New(codes.Unknown, fmt.Sprintf("transport: malformed grpc-status: %v", err))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
- rawStatusCode = codes.Code(uint32(code))
+ grpcStatusCode = codes.Code(uint32(code))
case "grpc-message":
grpcMessage = decodeGrpcMessage(hf.Value)
case ":status":
- if hf.Value == "200" {
- httpStatusErr = ""
- statusCode := 200
- httpStatusCode = &statusCode
- break
- }
-
- c, err := strconv.ParseInt(hf.Value, 10, 32)
- if err != nil {
- se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
- t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
- return
- }
- statusCode := int(c)
- httpStatusCode = &statusCode
-
- httpStatusErr = fmt.Sprintf(
- "unexpected HTTP status code received from server: %d (%s)",
- statusCode,
- http.StatusText(statusCode),
- )
+ httpStatus = hf.Value
default:
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
break
@@ -1534,25 +1511,52 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}
- if !isGRPC || httpStatusErr != "" {
- var code = codes.Internal // when header does not include HTTP status, return INTERNAL
-
- if httpStatusCode != nil {
+ // If a non-gRPC response is received, then evaluate the HTTP status to
+ // process the response and close the stream.
+ // In case http status doesn't provide any error information (status : 200),
+ // then evalute response code to be Unknown.
+ if !isGRPC {
+ var grpcErrorCode = codes.Internal
+ if httpStatus == "" {
+ httpStatusErr = "malformed header: missing HTTP status"
+ } else {
+ // Parse the status codes (e.g. "200", 404").
+ statusCode, err := strconv.Atoi(httpStatus)
+ if err != nil {
+ se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+ if statusCode >= 100 && statusCode < 200 {
+ if endStream {
+ se := status.New(codes.Internal, fmt.Sprintf(
+ "protocol error: informational header with status code %d must not have END_STREAM set", statusCode))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ }
+ // In case of informational headers, return.
+ return
+ }
+ httpStatusErr = fmt.Sprintf(
+ "unexpected HTTP status code received from server: %d (%s)",
+ statusCode,
+ http.StatusText(statusCode),
+ )
var ok bool
- code, ok = HTTPStatusConvTab[*httpStatusCode]
+ grpcErrorCode, ok = HTTPStatusConvTab[statusCode]
if !ok {
- code = codes.Unknown
+ grpcErrorCode = codes.Unknown
}
}
var errs []string
if httpStatusErr != "" {
errs = append(errs, httpStatusErr)
}
+
if contentTypeErr != "" {
errs = append(errs, contentTypeErr)
}
- // Verify the HTTP response is a 200.
- se := status.New(code, strings.Join(errs, "; "))
+
+ se := status.New(grpcErrorCode, strings.Join(errs, "; "))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
@@ -1583,22 +1587,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}
- for _, sh := range t.statsHandlers {
+ if t.statsHandler != nil {
if !endStream {
- inHeader := &stats.InHeader{
+ t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{
Client: true,
WireLength: int(frame.Header().Length),
Header: metadata.MD(mdata).Copy(),
Compression: s.recvCompress,
- }
- sh.HandleRPC(s.ctx, inHeader)
+ })
} else {
- inTrailer := &stats.InTrailer{
+ t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
Trailer: metadata.MD(mdata).Copy(),
- }
- sh.HandleRPC(s.ctx, inTrailer)
+ })
}
}
@@ -1606,7 +1608,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return
}
- status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
+ status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
// If client received END_STREAM from server while stream was still active,
// send RST_STREAM.
@@ -1653,7 +1655,7 @@ func (t *http2Client) reader(errCh chan<- error) {
// loop to keep reading incoming messages on this transport.
for {
t.controlBuf.throttle()
- frame, err := t.framer.fr.ReadFrame()
+ frame, err := t.framer.readFrame()
if t.keepaliveEnabled {
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
}
@@ -1668,7 +1670,7 @@ func (t *http2Client) reader(errCh chan<- error) {
if s != nil {
// use error detail to provide better err message
code := http2ErrConvTab[se.Code]
- errorDetail := t.framer.fr.ErrorDetail()
+ errorDetail := t.framer.errorDetail()
var msg string
if errorDetail != nil {
msg = errorDetail.Error()
@@ -1686,8 +1688,9 @@ func (t *http2Client) reader(errCh chan<- error) {
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
t.operateHeaders(frame)
- case *http2.DataFrame:
+ case *parsedDataFrame:
t.handleData(frame)
+ frame.data.Free()
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
@@ -1807,8 +1810,6 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
}
}
-func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
-
func (t *http2Client) incrMsgSent() {
if channelz.IsOn() {
t.channelz.SocketMetrics.MessagesSent.Add(1)
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 9f725e15..6f78a6b0 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -35,6 +35,8 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/protobuf/proto"
+
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
@@ -42,7 +44,6 @@ import (
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/mem"
- "google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@@ -86,7 +87,7 @@ type http2Server struct {
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
fc *trInFlow
- stats []stats.Handler
+ stats stats.Handler
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
@@ -168,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
if config.MaxHeaderListSize != nil {
maxHeaderListSize = *config.MaxHeaderListSize
}
- framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
+ framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool)
// Send initial settings as connection preface to client.
isettings := []http2.Setting{{
ID: http2.SettingMaxFrameSize,
@@ -260,7 +261,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
activeStreams: make(map[uint32]*ServerStream),
- stats: config.StatsHandlers,
+ stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
@@ -390,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
}
t.maxStreamID = streamID
- buf := newRecvBuffer()
s := &ServerStream{
- Stream: &Stream{
- id: streamID,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ Stream: Stream{
+ id: streamID,
+ fc: inFlow{limit: uint32(t.initialWindowSize)},
},
st: t,
headerWireLength: int(frame.Header().Length),
}
+ s.Stream.buf.init()
var (
// if false, content-type was missing or invalid
isGRPC = false
@@ -640,25 +640,21 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
t.channelz.SocketMetrics.StreamsStarted.Add(1)
t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
}
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
+ s.readRequester = s
s.ctxDone = s.ctx.Done()
- s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
- s.trReader = &transportReader{
- reader: &recvBufferReader{
+ s.Stream.wq.init(defaultWriteQuota, s.ctxDone)
+ s.trReader = transportReader{
+ reader: recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctxDone,
- recv: s.buf,
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
+ recv: &s.buf,
},
+ windowHandler: s,
}
// Register the stream with loopy.
t.controlBuf.put(®isterStream{
streamID: s.id,
- wq: s.wq,
+ wq: &s.wq,
})
handle(s)
return nil
@@ -674,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
}()
for {
t.controlBuf.throttle()
- frame, err := t.framer.fr.ReadFrame()
+ frame, err := t.framer.readFrame()
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
@@ -711,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
})
continue
}
- case *http2.DataFrame:
+ case *parsedDataFrame:
t.handleData(frame)
+ frame.data.Free()
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
@@ -792,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) {
}
-func (t *http2Server) handleData(f *http2.DataFrame) {
+func (t *http2Server) handleData(f *parsedDataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
@@ -837,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
return
}
+ dataLen := f.data.Len()
if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+ if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- pool := t.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
- }
- s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
+ if dataLen > 0 {
+ f.data.Ref()
+ s.write(recvMsg{buffer: f.data})
}
}
if f.StreamEnded() {
@@ -1059,14 +1049,13 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
- for _, sh := range t.stats {
+ if t.stats != nil {
// Note: Headers are compressed with hpack after this call returns.
// No WireLength field is set here.
- outHeader := &stats.OutHeader{
+ t.stats.HandleRPC(s.Context(), &stats.OutHeader{
Header: s.header.Copy(),
Compression: s.sendCompress,
- }
- sh.HandleRPC(s.Context(), outHeader)
+ })
}
return nil
}
@@ -1134,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
// Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
- for _, sh := range t.stats {
+ if t.stats != nil {
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
- sh.HandleRPC(s.Context(), &stats.OutTrailer{
+ t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
}
@@ -1305,7 +1294,8 @@ func (t *http2Server) Close(err error) {
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
t.mu.Lock()
- if _, ok := t.activeStreams[s.id]; ok {
+ _, isActive := t.activeStreams[s.id]
+ if isActive {
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
@@ -1313,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
}
t.mu.Unlock()
- if channelz.IsOn() {
+ if isActive && channelz.IsOn() {
if eosReceived {
t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
} else {
@@ -1353,10 +1343,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo
// called to interrupt the potential blocking on other goroutines.
s.cancel()
- oldState := s.swapState(streamDone)
- if oldState == streamDone {
- return
- }
+ // We can't return early even if the stream's state is "done" as the state
+ // might have been set by the `finishStream` method. Deleting the stream via
+ // `finishStream` can get blocked on flow control.
+ s.swapState(streamDone)
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index e3663f87..5bbb641a 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -25,7 +25,6 @@ import (
"fmt"
"io"
"math"
- "net"
"net/http"
"net/url"
"strconv"
@@ -37,6 +36,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/mem"
)
const (
@@ -300,11 +300,11 @@ type bufWriter struct {
buf []byte
offset int
batchSize int
- conn net.Conn
+ conn io.Writer
err error
}
-func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
+func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter {
w := &bufWriter{
batchSize: batchSize,
conn: conn,
@@ -388,15 +388,29 @@ func toIOError(err error) error {
return ioError{error: err}
}
+type parsedDataFrame struct {
+ http2.FrameHeader
+ data mem.Buffer
+}
+
+func (df *parsedDataFrame) StreamEnded() bool {
+ return df.FrameHeader.Flags.Has(http2.FlagDataEndStream)
+}
+
type framer struct {
- writer *bufWriter
- fr *http2.Framer
+ writer *bufWriter
+ fr *http2.Framer
+ headerBuf []byte // cached slice for framer headers to reduce heap allocs.
+ reader io.Reader
+ dataFrame parsedDataFrame // Cached data frame to avoid heap allocations.
+ pool mem.BufferPool
+ errDetail error
}
var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
+func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer {
if writeBufferSize < 0 {
writeBufferSize = 0
}
@@ -412,6 +426,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
f := &framer{
writer: w,
fr: http2.NewFramer(w, r),
+ reader: r,
+ pool: memPool,
}
f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
// Opt-in to Frame reuse API on framer to reduce garbage.
@@ -422,6 +438,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
return f
}
+// writeData writes a DATA frame.
+//
+// It is the caller's responsibility not to violate the maximum frame size.
+func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error {
+ var flags http2.Flags
+ if endStream {
+ flags = http2.FlagDataEndStream
+ }
+ length := uint32(0)
+ for _, d := range data {
+ length += uint32(len(d))
+ }
+ // TODO: Replace the header write with the framer API being added in
+ // https://github.com/golang/go/issues/66655.
+ f.headerBuf = append(f.headerBuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length),
+ byte(http2.FrameData),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+ if _, err := f.writer.Write(f.headerBuf); err != nil {
+ return err
+ }
+ for _, d := range data {
+ if _, err := f.writer.Write(d); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// readFrame reads a single frame. The returned Frame is only valid
+// until the next call to readFrame.
+func (f *framer) readFrame() (any, error) {
+ f.errDetail = nil
+ fh, err := f.fr.ReadFrameHeader()
+ if err != nil {
+ f.errDetail = f.fr.ErrorDetail()
+ return nil, err
+ }
+ // Read the data frame directly from the underlying io.Reader to avoid
+ // copies.
+ if fh.Type == http2.FrameData {
+ err = f.readDataFrame(fh)
+ return &f.dataFrame, err
+ }
+ fr, err := f.fr.ReadFrameForHeader(fh)
+ if err != nil {
+ f.errDetail = f.fr.ErrorDetail()
+ return nil, err
+ }
+ return fr, err
+}
+
+// errorDetail returns a more detailed error of the last error
+// returned by framer.readFrame. For instance, if readFrame
+// returns a StreamError with code PROTOCOL_ERROR, errorDetail
+// will say exactly what was invalid. errorDetail is not guaranteed
+// to return a non-nil value.
+// errorDetail is reset after the next call to readFrame.
+func (f *framer) errorDetail() error {
+ return f.errDetail
+}
+
+func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ f.errDetail = errors.New("DATA frame with stream ID 0")
+ return http2.ConnectionError(http2.ErrCodeProtocol)
+ }
+ // Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This
+ // conversion is performed by mem.NewBuffer. To avoid the extra allocation
+ // a []byte is allocated directly if required and cast to a mem.SliceBuffer.
+ var buf []byte
+ // poolHandle is the pointer returned by the buffer pool (if it's used.).
+ var poolHandle *[]byte
+ useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length))
+ if useBufferPool {
+ poolHandle = f.pool.Get(int(fh.Length))
+ buf = *poolHandle
+ defer func() {
+ if err != nil {
+ f.pool.Put(poolHandle)
+ }
+ }()
+ } else {
+ buf = make([]byte, int(fh.Length))
+ }
+ if fh.Flags.Has(http2.FlagDataPadded) {
+ if fh.Length == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ // This initial 1-byte read can be inefficient for unbuffered readers,
+ // but it allows the rest of the payload to be read directly to the
+ // start of the destination slice. This makes it easy to return the
+ // original slice back to the buffer pool.
+ if _, err := io.ReadFull(f.reader, buf[:1]); err != nil {
+ return err
+ }
+ padSize := buf[0]
+ buf = buf[:len(buf)-1]
+ if int(padSize) > len(buf) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ f.errDetail = errors.New("pad size larger than data payload")
+ return http2.ConnectionError(http2.ErrCodeProtocol)
+ }
+ if _, err := io.ReadFull(f.reader, buf); err != nil {
+ return err
+ }
+ buf = buf[:len(buf)-int(padSize)]
+ } else if _, err := io.ReadFull(f.reader, buf); err != nil {
+ return err
+ }
+
+ f.dataFrame.FrameHeader = fh
+ if useBufferPool {
+ // Update the handle to point to the (potentially re-sliced) buf.
+ *poolHandle = buf
+ f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool)
+ } else {
+ f.dataFrame.data = mem.SliceBuffer(buf)
+ }
+ return nil
+}
+
+func (df *parsedDataFrame) Header() http2.FrameHeader {
+ return df.FrameHeader
+}
+
func getWriteBufferPool(size int) *sync.Pool {
writeBufferMutex.Lock()
defer writeBufferMutex.Unlock()
diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
index cf8da0b5..ed6a13b7 100644
--- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
@@ -32,7 +32,7 @@ import (
// ServerStream implements streaming functionality for a gRPC server.
type ServerStream struct {
- *Stream // Embed for common stream functionality.
+ Stream // Embed for common stream functionality.
st internalServerTransport
ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
@@ -43,12 +43,13 @@ type ServerStream struct {
// Holds compressor names passed in grpc-accept-encoding metadata from the
// client.
clientAdvertisedCompressors string
- headerWireLength int
// hdrMu protects outgoing header and trailer metadata.
hdrMu sync.Mutex
header metadata.MD // the outgoing header metadata. Updated by WriteHeader.
headerSent atomic.Bool // atomically set when the headers are sent out.
+
+ headerWireLength int
}
// Read reads an n byte message from the input stream.
@@ -178,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error {
s.hdrMu.Unlock()
return nil
}
+
+func (s *ServerStream) requestRead(n int) {
+ s.st.adjustWindow(s, uint32(n))
+}
+
+func (s *ServerStream) updateWindow(n int) {
+ s.st.updateWindow(s, uint32(n))
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 7dd53e80..6daf1e00 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -68,11 +68,11 @@ type recvBuffer struct {
err error
}
-func newRecvBuffer() *recvBuffer {
- b := &recvBuffer{
- c: make(chan recvMsg, 1),
- }
- return b
+// init allows a recvBuffer to be initialized in-place, which is useful
+// for resetting a buffer or for avoiding a heap allocation when the buffer
+// is embedded in another struct.
+func (b *recvBuffer) init() {
+ b.c = make(chan recvMsg, 1)
}
func (b *recvBuffer) put(r recvMsg) {
@@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg {
// recvBufferReader implements io.Reader interface to read the data from
// recvBuffer.
type recvBufferReader struct {
- closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
- ctx context.Context
- ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
- recv *recvBuffer
- last mem.Buffer // Stores the remaining data in the previous calls.
- err error
+ _ noCopy
+ clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata.
+ ctx context.Context
+ ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
+ recv *recvBuffer
+ last mem.Buffer // Stores the remaining data in the previous calls.
+ err error
}
func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
@@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
n, r.last = mem.ReadUnsafe(header, r.last)
return n, nil
}
- if r.closeStream != nil {
+ if r.clientStream != nil {
n, r.err = r.readMessageHeaderClient(header)
} else {
n, r.err = r.readMessageHeader(header)
@@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
}
return buf, nil
}
- if r.closeStream != nil {
+ if r.clientStream != nil {
buf, r.err = r.readClient(n)
} else {
buf, r.err = r.read(n)
@@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
- r.closeStream(ContextErr(r.ctx.Err()))
+ r.clientStream.Close(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readMessageHeaderAdditional(m, header)
case m := <-r.recv.get():
@@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
- r.closeStream(ContextErr(r.ctx.Err()))
+ r.clientStream.Close(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readAdditional(m, n)
case m := <-r.recv.get():
@@ -285,27 +286,32 @@ const (
// Stream represents an RPC in the transport layer.
type Stream struct {
- id uint32
ctx context.Context // the associated context of the stream
method string // the associated RPC method of the stream
recvCompress string
sendCompress string
- buf *recvBuffer
- trReader *transportReader
- fc *inFlow
- wq *writeQuota
-
- // Callback to state application's intentions to read data. This
- // is used to adjust flow control, if needed.
- requestRead func(int)
- state streamState
+ readRequester readRequester
// contentSubtype is the content-subtype for requests.
// this must be lowercase or the behavior is undefined.
contentSubtype string
trailer metadata.MD // the key-value map of trailer metadata.
+
+ // Non-pointer fields are at the end to optimize GC performance.
+ state streamState
+ id uint32
+ buf recvBuffer
+ trReader transportReader
+ fc inFlow
+ wq writeQuota
+}
+
+// readRequester is used to state application's intentions to read data. This
+// is used to adjust flow control, if needed.
+type readRequester interface {
+ requestRead(int)
}
func (s *Stream) swapState(st streamState) streamState {
@@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) {
if er := s.trReader.er; er != nil {
return er
}
- s.requestRead(len(header))
+ s.readRequester.requestRead(len(header))
for len(header) != 0 {
n, err := s.trReader.ReadMessageHeader(header)
header = header[n:]
@@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
if er := s.trReader.er; er != nil {
return nil, er
}
- s.requestRead(n)
+ s.readRequester.requestRead(n)
for n != 0 {
buf, err := s.trReader.Read(n)
var bufLen int
@@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
return data, nil
}
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct {
+}
+
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
- reader *recvBufferReader
+ _ noCopy
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
- windowHandler func(int)
+ windowHandler windowHandler
er error
+ reader recvBufferReader
+}
+
+// The handler to control the window update procedure for both this
+// particular stream and the associated transport.
+type windowHandler interface {
+ updateWindow(int)
}
func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
@@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
t.er = err
return 0, err
}
- t.windowHandler(n)
+ t.windowHandler.updateWindow(n)
return n, nil
}
@@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) {
t.er = err
return buf, err
}
- t.windowHandler(buf.Len())
+ t.windowHandler.updateWindow(buf.Len())
return buf, nil
}
@@ -454,7 +478,7 @@ type ServerConfig struct {
ConnectionTimeout time.Duration
Credentials credentials.TransportCredentials
InTapHandle tap.ServerInHandle
- StatsHandlers []stats.Handler
+ StatsHandler stats.Handler
KeepaliveParams keepalive.ServerParameters
KeepalivePolicy keepalive.EnforcementPolicy
InitialWindowSize int32
@@ -529,6 +553,12 @@ type CallHdr struct {
// outbound message.
SendCompress string
+ // AcceptedCompressors overrides the grpc-accept-encoding header for this
+ // call. When nil, the transport advertises the default set of registered
+ // compressors. A non-nil pointer overrides that value (including the empty
+ // string to advertise none).
+ AcceptedCompressors *string
+
// Creds specifies credentials.PerRPCCredentials for a call.
Creds credentials.PerRPCCredentials
@@ -584,8 +614,9 @@ type ClientTransport interface {
// with a human readable string with debug info.
GetGoAwayReason() (GoAwayReason, string)
- // RemoteAddr returns the remote network address.
- RemoteAddr() net.Addr
+ // Peer returns information about the peer associated with the Transport.
+ // The returned information includes authentication and network address details.
+ Peer() *peer.Peer
}
// ServerTransport is the common interface for all gRPC server-side transport
@@ -615,6 +646,8 @@ type internalServerTransport interface {
write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
writeStatus(s *ServerStream, st *status.Status) error
incrMsgRecv()
+ adjustWindow(s *ServerStream, n uint32)
+ updateWindow(s *ServerStream, n uint32)
}
// connectionErrorf creates an ConnectionError with the specified error description.
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
index c37c58c0..e37afdd1 100644
--- a/vendor/google.golang.org/grpc/mem/buffer_pool.go
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -32,12 +32,17 @@ type BufferPool interface {
Get(length int) *[]byte
// Put returns a buffer to the pool.
+ //
+ // The provided pointer must hold a prefix of the buffer obtained via
+ // BufferPool.Get to ensure the buffer's entire capacity can be re-used.
Put(*[]byte)
}
+const goPageSize = 4 << 10 // 4KiB. N.B. this must be a power of 2.
+
var defaultBufferPoolSizes = []int{
256,
- 4 << 10, // 4KB (go page size)
+ goPageSize,
16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
32 << 10, // 32KB (default buffer size for io.Copy)
1 << 20, // 1MB
@@ -118,7 +123,11 @@ type sizedBufferPool struct {
}
func (p *sizedBufferPool) Get(size int) *[]byte {
- buf := p.pool.Get().(*[]byte)
+ buf, ok := p.pool.Get().(*[]byte)
+ if !ok {
+ buf := make([]byte, size, p.defaultSize)
+ return &buf
+ }
b := *buf
clear(b[:cap(b)])
*buf = b[:size]
@@ -137,12 +146,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) {
func newSizedBufferPool(size int) *sizedBufferPool {
return &sizedBufferPool{
- pool: sync.Pool{
- New: func() any {
- buf := make([]byte, size)
- return &buf
- },
- },
defaultSize: size,
}
}
@@ -160,6 +163,7 @@ type simpleBufferPool struct {
func (p *simpleBufferPool) Get(size int) *[]byte {
bs, ok := p.pool.Get().(*[]byte)
if ok && cap(*bs) >= size {
+ clear((*bs)[:cap(*bs)])
*bs = (*bs)[:size]
return bs
}
@@ -170,7 +174,14 @@ func (p *simpleBufferPool) Get(size int) *[]byte {
p.pool.Put(bs)
}
- b := make([]byte, size)
+ // If we're going to allocate, round up to the nearest page. This way if
+ // requests frequently arrive with small variation we don't allocate
+ // repeatedly if we get unlucky and they increase over time. By default we
+ // only allocate here if size > 1MiB. Because goPageSize is a power of 2, we
+ // can round up efficiently.
+ allocSize := (size + goPageSize - 1) & ^(goPageSize - 1)
+
+ b := make([]byte, size, allocSize)
return &b
}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
index af510d20..084fb19c 100644
--- a/vendor/google.golang.org/grpc/mem/buffer_slice.go
+++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -19,6 +19,7 @@
package mem
import (
+ "fmt"
"io"
)
@@ -117,43 +118,36 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
// Reader returns a new Reader for the input slice after taking references to
// each underlying buffer.
-func (s BufferSlice) Reader() Reader {
+func (s BufferSlice) Reader() *Reader {
s.Ref()
- return &sliceReader{
+ return &Reader{
data: s,
len: s.Len(),
}
}
// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
-// with other parts systems. It also provides an additional convenience method
-// Remaining(), which returns the number of unread bytes remaining in the slice.
+// with other systems.
+//
// Buffers will be freed as they are read.
-type Reader interface {
- io.Reader
- io.ByteReader
- // Close frees the underlying BufferSlice and never returns an error. Subsequent
- // calls to Read will return (0, io.EOF).
- Close() error
- // Remaining returns the number of unread bytes remaining in the slice.
- Remaining() int
- // Reset frees the currently held buffer slice and starts reading from the
- // provided slice. This allows reusing the reader object.
- Reset(s BufferSlice)
-}
-
-type sliceReader struct {
+//
+// A Reader can be constructed from a BufferSlice; alternatively the zero value
+// of a Reader may be used after calling Reset on it.
+type Reader struct {
data BufferSlice
len int
// The index into data[0].ReadOnlyData().
bufferIdx int
}
-func (r *sliceReader) Remaining() int {
+// Remaining returns the number of unread bytes remaining in the slice.
+func (r *Reader) Remaining() int {
return r.len
}
-func (r *sliceReader) Reset(s BufferSlice) {
+// Reset frees the currently held buffer slice and starts reading from the
+// provided slice. This allows reusing the reader object.
+func (r *Reader) Reset(s BufferSlice) {
r.data.Free()
s.Ref()
r.data = s
@@ -161,14 +155,16 @@ func (r *sliceReader) Reset(s BufferSlice) {
r.bufferIdx = 0
}
-func (r *sliceReader) Close() error {
+// Close frees the underlying BufferSlice and never returns an error. Subsequent
+// calls to Read will return (0, io.EOF).
+func (r *Reader) Close() error {
r.data.Free()
r.data = nil
r.len = 0
return nil
}
-func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+func (r *Reader) freeFirstBufferIfEmpty() bool {
if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
return false
}
@@ -179,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool {
return true
}
-func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+func (r *Reader) Read(buf []byte) (n int, _ error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -202,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) {
return n, nil
}
-func (r *sliceReader) ReadByte() (byte, error) {
+// ReadByte reads a single byte.
+func (r *Reader) ReadByte() (byte, error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -290,3 +287,59 @@ nextBuffer:
}
}
}
+
+// Discard skips the next n bytes, returning the number of bytes discarded.
+//
+// It frees buffers as they are fully consumed.
+//
+// If Discard skips fewer than n bytes, it also returns an error.
+func (r *Reader) Discard(n int) (discarded int, err error) {
+ total := n
+ for n > 0 && r.len > 0 {
+ curData := r.data[0].ReadOnlyData()
+ curSize := min(n, len(curData)-r.bufferIdx)
+ n -= curSize
+ r.len -= curSize
+ r.bufferIdx += curSize
+ if r.bufferIdx >= len(curData) {
+ r.data[0].Free()
+ r.data = r.data[1:]
+ r.bufferIdx = 0
+ }
+ }
+ discarded = total - n
+ if n > 0 {
+ return discarded, fmt.Errorf("insufficient bytes in reader")
+ }
+ return discarded, nil
+}
+
+// Peek returns the next n bytes without advancing the reader.
+//
+// Peek appends results to the provided res slice and returns the updated slice.
+// This pattern allows re-using the storage of res if it has sufficient
+// capacity.
+//
+// The returned subslices are views into the underlying buffers and are only
+// valid until the reader is advanced past the corresponding buffer.
+//
+// If Peek returns fewer than n bytes, it also returns an error.
+func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) {
+ for i := 0; n > 0 && i < len(r.data); i++ {
+ curData := r.data[i].ReadOnlyData()
+ start := 0
+ if i == 0 {
+ start = r.bufferIdx
+ }
+ curSize := min(n, len(curData)-start)
+ if curSize == 0 {
+ continue
+ }
+ res = append(res, curData[start:start+curSize])
+ n -= curSize
+ }
+ if n > 0 {
+ return nil, fmt.Errorf("insufficient bytes in reader")
+ }
+ return res, nil
+}
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index ee0ff969..1e783feb 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
}
// check if the context has the relevant information to prepareMsg
- if rpcInfo.preloaderInfo == nil {
- return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
- }
if rpcInfo.preloaderInfo.codec == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
}
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
index 80e16a32..6e613764 100644
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -69,6 +69,7 @@ func (ccr *ccResolverWrapper) start() error {
errCh := make(chan error)
ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil {
+ errCh <- ctx.Err()
return
}
opts := resolver.BuildOptions{
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 47ea09f5..8160f943 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -33,6 +33,8 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
@@ -41,6 +43,10 @@ import (
"google.golang.org/grpc/status"
)
+func init() {
+ internal.AcceptCompressors = acceptCompressors
+}
+
// Compressor defines the interface gRPC uses to compress a message.
//
// Deprecated: use package encoding.
@@ -151,16 +157,32 @@ func (d *gzipDecompressor) Type() string {
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
- compressorName string
- failFast bool
- maxReceiveMessageSize *int
- maxSendMessageSize *int
- creds credentials.PerRPCCredentials
- contentSubtype string
- codec baseCodec
- maxRetryRPCBufferSize int
- onFinish []func(err error)
- authority string
+ compressorName string
+ failFast bool
+ maxReceiveMessageSize *int
+ maxSendMessageSize *int
+ creds credentials.PerRPCCredentials
+ contentSubtype string
+ codec baseCodec
+ maxRetryRPCBufferSize int
+ onFinish []func(err error)
+ authority string
+ acceptedResponseCompressors []string
+}
+
+func acceptedCompressorAllows(allowed []string, name string) bool {
+ if allowed == nil {
+ return true
+ }
+ if name == "" || name == encoding.Identity {
+ return true
+ }
+ for _, a := range allowed {
+ if a == name {
+ return true
+ }
+ }
+ return false
}
func defaultCallInfo() *callInfo {
@@ -170,6 +192,29 @@ func defaultCallInfo() *callInfo {
}
}
+func newAcceptedCompressionConfig(names []string) ([]string, error) {
+ if len(names) == 0 {
+ return nil, nil
+ }
+ var allowed []string
+ seen := make(map[string]struct{}, len(names))
+ for _, name := range names {
+ name = strings.TrimSpace(name)
+ if name == "" || name == encoding.Identity {
+ continue
+ }
+ if !grpcutil.IsCompressorNameRegistered(name) {
+ return nil, status.Errorf(codes.InvalidArgument, "grpc: compressor %q is not registered", name)
+ }
+ if _, dup := seen[name]; dup {
+ continue
+ }
+ seen[name] = struct{}{}
+ allowed = append(allowed, name)
+ }
+ return allowed, nil
+}
+
// CallOption configures a Call before it starts or extracts information from
// a Call after it completes.
type CallOption interface {
@@ -471,6 +516,31 @@ func (o CompressorCallOption) before(c *callInfo) error {
}
func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
+// acceptCompressors returns a CallOption that limits the compression algorithms
+// advertised in the grpc-accept-encoding header for response messages.
+// Compression algorithms not in the provided list will not be advertised, and
+// responses compressed with non-listed algorithms will be rejected.
+func acceptCompressors(names ...string) CallOption {
+ cp := append([]string(nil), names...)
+ return acceptCompressorsCallOption{names: cp}
+}
+
+// acceptCompressorsCallOption is a CallOption that limits response compression.
+type acceptCompressorsCallOption struct {
+ names []string
+}
+
+func (o acceptCompressorsCallOption) before(c *callInfo) error {
+ allowed, err := newAcceptedCompressionConfig(o.names)
+ if err != nil {
+ return err
+ }
+ c.acceptedResponseCompressors = allowed
+ return nil
+}
+
+func (acceptCompressorsCallOption) after(*callInfo, *csAttempt) {}
+
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
// the wire will be "application/grpc+json". The content-subtype is converted
@@ -657,8 +727,20 @@ type streamReader interface {
Read(n int) (mem.BufferSlice, error)
}
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct {
+}
+
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
+ _ noCopy
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
@@ -845,8 +927,7 @@ func (p *payloadInfo) free() {
// the buffer is no longer needed.
// TODO: Refactor this function to reduce the number of arguments.
// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
-func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
-) (out mem.BufferSlice, err error) {
+func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) (out mem.BufferSlice, err error) {
pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return nil, err
@@ -949,7 +1030,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR
// Information about RPC
type rpcInfo struct {
failfast bool
- preloaderInfo *compressorInfo
+ preloaderInfo compressorInfo
}
// Information about Preloader
@@ -968,7 +1049,7 @@ type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
failfast: failfast,
- preloaderInfo: &compressorInfo{
+ preloaderInfo: compressorInfo{
codec: codec,
cp: cp,
comp: comp,
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 1da2a542..ddd37734 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -124,7 +124,8 @@ type serviceInfo struct {
// Server is a gRPC server to serve RPC requests.
type Server struct {
- opts serverOptions
+ opts serverOptions
+ statsHandler stats.Handler
mu sync.Mutex // guards following
lis map[net.Listener]bool
@@ -692,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server {
o.apply(&opts)
}
s := &Server{
- lis: make(map[net.Listener]bool),
- opts: opts,
- conns: make(map[string]map[transport.ServerTransport]bool),
- services: make(map[string]*serviceInfo),
- quit: grpcsync.NewEvent(),
- done: grpcsync.NewEvent(),
- channelz: channelz.RegisterServer(""),
+ lis: make(map[net.Listener]bool),
+ opts: opts,
+ statsHandler: istats.NewCombinedHandler(opts.statsHandlers...),
+ conns: make(map[string]map[transport.ServerTransport]bool),
+ services: make(map[string]*serviceInfo),
+ quit: grpcsync.NewEvent(),
+ done: grpcsync.NewEvent(),
+ channelz: channelz.RegisterServer(""),
}
chainUnaryServerInterceptors(s)
chainStreamServerInterceptors(s)
@@ -999,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ConnectionTimeout: s.opts.connectionTimeout,
Credentials: s.opts.creds,
InTapHandle: s.opts.inTapHandle,
- StatsHandlers: s.opts.statsHandlers,
+ StatsHandler: s.statsHandler,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
InitialWindowSize: s.opts.initialWindowSize,
@@ -1036,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
ctx = transport.SetConnection(ctx, rawConn)
ctx = peer.NewContext(ctx, st.Peer())
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
+ if s.statsHandler != nil {
+ ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{
RemoteAddr: st.Peer().Addr,
LocalAddr: st.Peer().LocalAddr,
})
- sh.HandleConn(ctx, &stats.ConnBegin{})
+ s.statsHandler.HandleConn(ctx, &stats.ConnBegin{})
}
defer func() {
st.Close(errors.New("finished serving streams for the server transport"))
- for _, sh := range s.opts.statsHandlers {
- sh.HandleConn(ctx, &stats.ConnEnd{})
+ if s.statsHandler != nil {
+ s.statsHandler.HandleConn(ctx, &stats.ConnEnd{})
}
}()
@@ -1104,7 +1106,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
+ st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool)
if err != nil {
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
@@ -1198,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
err = stream.Write(hdr, payload, opts)
- if err == nil {
- if len(s.opts.statsHandlers) != 0 {
- for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
- }
- }
+ if err == nil && s.statsHandler != nil {
+ s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
}
return err
}
@@ -1245,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
}
func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
- shs := s.opts.statsHandlers
- if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+ sh := s.statsHandler
+ if sh != nil || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
s.incrCallsStarted()
}
var statsBegin *stats.Begin
- for _, sh := range shs {
- beginTime := time.Now()
+ if sh != nil {
statsBegin = &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: time.Now(),
IsClientStream: false,
IsServerStream: false,
}
@@ -1282,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
trInfo.tr.Finish()
}
- for _, sh := range shs {
+ if sh != nil {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1379,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
}
var payInfo *payloadInfo
- if len(shs) != 0 || len(binlogs) != 0 {
+ if sh != nil || len(binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
@@ -1405,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
- for _, sh := range shs {
+ if sh != nil {
sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
@@ -1579,33 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
if channelz.IsOn() {
s.incrCallsStarted()
}
- shs := s.opts.statsHandlers
+ sh := s.statsHandler
var statsBegin *stats.Begin
- if len(shs) != 0 {
- beginTime := time.Now()
+ if sh != nil {
statsBegin = &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: time.Now(),
IsClientStream: sd.ClientStreams,
IsServerStream: sd.ServerStreams,
}
- for _, sh := range shs {
- sh.HandleRPC(ctx, statsBegin)
- }
+ sh.HandleRPC(ctx, statsBegin)
}
ctx = NewContextWithServerTransportStream(ctx, stream)
ss := &serverStream{
ctx: ctx,
s: stream,
- p: &parser{r: stream, bufferPool: s.opts.bufferPool},
+ p: parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
desc: sd,
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
- statsHandler: shs,
+ statsHandler: sh,
}
- if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+ if sh != nil || trInfo != nil || channelz.IsOn() {
// See comment in processUnaryRPC on defers.
defer func() {
if trInfo != nil {
@@ -1619,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
ss.mu.Unlock()
}
- if len(shs) != 0 {
+ if sh != nil {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1627,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
- for _, sh := range shs {
- sh.HandleRPC(ctx, end)
- }
+ sh.HandleRPC(ctx, end)
}
if channelz.IsOn() {
@@ -1818,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser
method := sm[pos+1:]
// FromIncomingContext is expensive: skip if there are no statsHandlers
- if len(s.opts.statsHandlers) > 0 {
+ if s.statsHandler != nil {
md, _ := metadata.FromIncomingContext(ctx)
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
- sh.HandleRPC(ctx, &stats.InHeader{
- FullMethod: stream.Method(),
- RemoteAddr: t.Peer().Addr,
- LocalAddr: t.Peer().LocalAddr,
- Compression: stream.RecvCompress(),
- WireLength: stream.HeaderWireLength(),
- Header: md,
- })
- }
+ ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
+ s.statsHandler.HandleRPC(ctx, &stats.InHeader{
+ FullMethod: stream.Method(),
+ RemoteAddr: t.Peer().Addr,
+ LocalAddr: t.Peer().LocalAddr,
+ Compression: stream.RecvCompress(),
+ WireLength: stream.HeaderWireLength(),
+ Header: md,
+ })
}
// To have calls in stream callouts work. Will delete once all stats handler
// calls come from the gRPC layer.
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index d9bbd4c5..ec9577b2 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -25,6 +25,7 @@ import (
"math"
rand "math/rand/v2"
"strconv"
+ "strings"
"sync"
"time"
@@ -177,13 +178,43 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return cc.NewStream(ctx, desc, method, opts...)
}
+var emptyMethodConfig = serviceconfig.MethodConfig{}
+
+// endOfClientStream performs cleanup actions required for both successful and
+// failed streams. This includes incrementing channelz stats and invoking all
+// registered OnFinish call options.
+func endOfClientStream(cc *ClientConn, err error, opts ...CallOption) {
+ if channelz.IsOn() {
+ if err != nil {
+ cc.incrCallsFailed()
+ } else {
+ cc.incrCallsSucceeded()
+ }
+ }
+
+ for _, o := range opts {
+ if o, ok := o.(OnFinishCallOption); ok {
+ o.OnFinish(err)
+ }
+ }
+}
+
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+ if channelz.IsOn() {
+ cc.incrCallsStarted()
+ }
+ defer func() {
+ if err != nil {
+ // Ensure cleanup when stream creation fails.
+ endOfClientStream(cc, err, opts...)
+ }
+ }()
+
// Start tracking the RPC for idleness purposes. This is where a stream is
// created for both streaming and unary RPCs, and hence is a good place to
// track active RPC count.
- if err := cc.idlenessMgr.OnCallBegin(); err != nil {
- return nil, err
- }
+ cc.idlenessMgr.OnCallBegin()
+
// Add a calloption, to decrement the active call count, that gets executed
// when the RPC completes.
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
@@ -202,14 +233,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
}
}
- if channelz.IsOn() {
- cc.incrCallsStarted()
- defer func() {
- if err != nil {
- cc.incrCallsFailed()
- }
- }()
- }
// Provide an opportunity for the first RPC to see the first service config
// provided by the resolver.
nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx)
@@ -217,7 +240,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return nil, err
}
- var mc serviceconfig.MethodConfig
+ mc := &emptyMethodConfig
var onCommit func()
newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...)
@@ -240,7 +263,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
if rpcConfig.Context != nil {
ctx = rpcConfig.Context
}
- mc = rpcConfig.MethodConfig
+ mc = &rpcConfig.MethodConfig
onCommit = rpcConfig.OnCommitted
if rpcConfig.Interceptor != nil {
rpcInfo.Context = nil
@@ -258,7 +281,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return newStream(ctx, func() {})
}
-func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
+func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
callInfo := defaultCallInfo()
if mc.WaitForReady != nil {
callInfo.failFast = !*mc.WaitForReady
@@ -299,6 +322,10 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
DoneFunc: doneFunc,
Authority: callInfo.authority,
}
+ if allowed := callInfo.acceptedResponseCompressors; len(allowed) > 0 {
+ headerValue := strings.Join(allowed, ",")
+ callHdr.AcceptedCompressors = &headerValue
+ }
// Set our outgoing compression according to the UseCompressor CallOption, if
// set. In that case, also find the compressor from the encoding package.
@@ -325,7 +352,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
cs := &clientStream{
callHdr: callHdr,
ctx: ctx,
- methodConfig: &mc,
+ methodConfig: mc,
opts: opts,
callInfo: callInfo,
cc: cc,
@@ -418,19 +445,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
method := cs.callHdr.Method
var beginTime time.Time
- shs := cs.cc.dopts.copts.StatsHandlers
- for _, sh := range shs {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay})
+ sh := cs.cc.statsHandler
+ if sh != nil {
beginTime = time.Now()
- begin := &stats.Begin{
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{
+ FullMethodName: method, FailFast: cs.callInfo.failFast,
+ NameResolutionDelay: cs.nameResolutionDelay,
+ })
+ sh.HandleRPC(ctx, &stats.Begin{
Client: true,
BeginTime: beginTime,
FailFast: cs.callInfo.failFast,
IsClientStream: cs.desc.ClientStreams,
IsServerStream: cs.desc.ServerStreams,
IsTransparentRetryAttempt: isTransparent,
- }
- sh.HandleRPC(ctx, begin)
+ })
}
var trInfo *traceInfo
@@ -461,7 +490,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
beginTime: beginTime,
cs: cs,
decompressorV0: cs.cc.dopts.dc,
- statsHandlers: shs,
+ statsHandler: sh,
trInfo: trInfo,
}, nil
}
@@ -480,12 +509,10 @@ func (a *csAttempt) getTransport() error {
return err
}
if a.trInfo != nil {
- a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
+ a.trInfo.firstLine.SetRemoteAddr(a.transport.Peer().Addr)
}
- if pick.blocked {
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
- }
+ if pick.blocked && a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
}
return nil
}
@@ -529,7 +556,7 @@ func (a *csAttempt) newStream() error {
}
a.transportStream = s
a.ctx = s.Context()
- a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
+ a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -549,6 +576,8 @@ type clientStream struct {
sentLast bool // sent an end stream
+ receivedFirstMsg bool // set after the first message is received
+
methodConfig *MethodConfig
ctx context.Context // the application's context, wrapped by stats/tracing
@@ -599,7 +628,7 @@ type csAttempt struct {
cs *clientStream
transport transport.ClientTransport
transportStream *transport.ClientStream
- parser *parser
+ parser parser
pickResult balancer.PickResult
finished bool
@@ -613,8 +642,8 @@ type csAttempt struct {
// and cleared when the finish method is called.
trInfo *traceInfo
- statsHandlers []stats.Handler
- beginTime time.Time
+ statsHandler stats.Handler
+ beginTime time.Time
// set for newStream errors that may be transparently retried
allowTransparentRetry bool
@@ -1038,9 +1067,6 @@ func (cs *clientStream) finish(err error) {
return
}
cs.finished = true
- for _, onFinish := range cs.callInfo.onFinish {
- onFinish(err)
- }
cs.commitAttemptLocked()
if cs.attempt != nil {
cs.attempt.finish(err)
@@ -1080,13 +1106,7 @@ func (cs *clientStream) finish(err error) {
if err == nil {
cs.retryThrottler.successfulRPC()
}
- if channelz.IsOn() {
- if err != nil {
- cs.cc.incrCallsFailed()
- } else {
- cs.cc.incrCallsSucceeded()
- }
- }
+ endOfClientStream(cs.cc, err, cs.opts...)
cs.cancel()
}
@@ -1108,17 +1128,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength
}
return io.EOF
}
- if len(a.statsHandlers) != 0 {
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
- }
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
}
return nil
}
func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
- if len(a.statsHandlers) != 0 && payInfo == nil {
+ if a.statsHandler != nil && payInfo == nil {
payInfo = &payloadInfo{}
defer payInfo.free()
}
@@ -1132,6 +1150,10 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
a.decompressorV0 = nil
a.decompressorV1 = encoding.GetCompressor(ct)
}
+ // Validate that the compression method is acceptable for this call.
+ if !acceptedCompressorAllows(cs.callInfo.acceptedResponseCompressors, ct) {
+ return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct)
+ }
} else {
// No compression is used; disable our decompressor.
a.decompressorV0 = nil
@@ -1139,16 +1161,21 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
// Only initialize this state once per stream.
a.decompressorSet = true
}
- if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
+ if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
if err == io.EOF {
if statusErr := a.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
+ // Received no msg and status OK for non-server streaming rpcs.
+ if !cs.desc.ServerStreams && !cs.receivedFirstMsg {
+ return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
+ }
return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
+ cs.receivedFirstMsg = true
if a.trInfo != nil {
a.mu.Lock()
if a.trInfo.tr != nil {
@@ -1156,8 +1183,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
a.mu.Unlock()
}
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, &stats.InPayload{
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
Client: true,
RecvTime: time.Now(),
Payload: m,
@@ -1172,12 +1199,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
+ if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
}
- return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message")
+ return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message")
}
func (a *csAttempt) finish(err error) {
@@ -1210,15 +1237,14 @@ func (a *csAttempt) finish(err error) {
ServerLoad: balancerload.Parse(tr),
})
}
- for _, sh := range a.statsHandlers {
- end := &stats.End{
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, &stats.End{
Client: true,
BeginTime: a.beginTime,
EndTime: time.Now(),
Trailer: tr,
Error: err,
- }
- sh.HandleRPC(a.ctx, end)
+ })
}
if a.trInfo != nil && a.trInfo.tr != nil {
if err == nil {
@@ -1324,7 +1350,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.transportStream = s
- as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
+ as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@@ -1359,6 +1385,7 @@ type addrConnStream struct {
transport transport.ClientTransport
ctx context.Context
sentLast bool
+ receivedFirstMsg bool
desc *StreamDesc
codec baseCodec
sendCompressorV0 Compressor
@@ -1366,7 +1393,7 @@ type addrConnStream struct {
decompressorSet bool
decompressorV0 Decompressor
decompressorV1 encoding.Compressor
- parser *parser
+ parser parser
// mu guards finished and is held for the entire finish method.
mu sync.Mutex
@@ -1472,6 +1499,10 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
as.decompressorV0 = nil
as.decompressorV1 = encoding.GetCompressor(ct)
}
+ // Validate that the compression method is acceptable for this call.
+ if !acceptedCompressorAllows(as.callInfo.acceptedResponseCompressors, ct) {
+ return status.Errorf(codes.Internal, "grpc: peer compressed the response with %q which is not allowed by AcceptCompressors", ct)
+ }
} else {
// No compression is used; disable our decompressor.
as.decompressorV0 = nil
@@ -1479,15 +1510,20 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Only initialize this state once per stream.
as.decompressorSet = true
}
- if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
+ if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
if err == io.EOF {
if statusErr := as.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
+ // Received no msg and status OK for non-server streaming rpcs.
+ if !as.desc.ServerStreams && !as.receivedFirstMsg {
+ return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
+ }
return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
+ as.receivedFirstMsg = true
if as.desc.ServerStreams {
// Subsequent messages should be received by subsequent RecvMsg calls.
@@ -1496,12 +1532,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
+ if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
}
- return status.Errorf(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message")
+ return status.Error(codes.Internal, "cardinality violation: expected for non server-streaming RPCs, but received another message")
}
func (as *addrConnStream) finish(err error) {
@@ -1584,7 +1620,7 @@ type ServerStream interface {
type serverStream struct {
ctx context.Context
s *transport.ServerStream
- p *parser
+ p parser
codec baseCodec
desc *StreamDesc
@@ -1601,7 +1637,7 @@ type serverStream struct {
maxSendMessageSize int
trInfo *traceInfo
- statsHandler []stats.Handler
+ statsHandler stats.Handler
binlogs []binarylog.MethodLogger
// serverHeaderBinlogged indicates whether server header has been logged. It
@@ -1737,10 +1773,8 @@ func (ss *serverStream) SendMsg(m any) (err error) {
binlog.Log(ss.ctx, sm)
}
}
- if len(ss.statsHandler) != 0 {
- for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
- }
+ if ss.statsHandler != nil {
+ ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
return nil
}
@@ -1771,11 +1805,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
}()
var payInfo *payloadInfo
- if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
+ if ss.statsHandler != nil || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
- if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
+ if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@@ -1795,16 +1829,14 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
return toRPCErr(err)
}
ss.recvFirstMsg = true
- if len(ss.statsHandler) != 0 {
- for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- Length: payInfo.uncompressedBytes.Len(),
- WireLength: payInfo.compressedLength + headerLen,
- CompressedLength: payInfo.compressedLength,
- })
- }
+ if ss.statsHandler != nil {
+ ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
+ RecvTime: time.Now(),
+ Payload: m,
+ Length: payInfo.uncompressedBytes.Len(),
+ WireLength: payInfo.compressedLength + headerLen,
+ CompressedLength: payInfo.compressedLength,
+ })
}
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
@@ -1821,7 +1853,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
// Special handling for non-client-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
+ if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
return nil
} else if err != nil {
return err
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index bc1eb290..ff7840fd 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.75.0"
+const Version = "1.78.0"
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 669133d0..c96e4483 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -32,7 +32,7 @@ var byteType = reflect.TypeOf(byte(0))
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
f := new(filedesc.Field)
f.L0.ParentFile = filedesc.SurrogateProto2
- f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
+ packed := false
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
@@ -108,7 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
f.L1.StringName.InitJSON(jsonName)
}
case s == "packed":
- f.L1.EditionFeatures.IsPacked = true
+ packed = true
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
@@ -121,6 +121,13 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
tag = strings.TrimPrefix(tag[i:], ",")
}
+ // Update EditionFeatures after the loop and after we know whether this is
+ // a proto2 or proto3 field.
+ f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
+ if packed {
+ f.L1.EditionFeatures.IsPacked = true
+ }
+
// The generator uses the group message name instead of the field name.
// We obtain the real field name by lowercasing the group name.
if f.L1.Kind == protoreflect.GroupKind {
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
index 099b2bf4..9aa7a9bb 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -424,27 +424,34 @@ func (d *Decoder) parseFieldName() (tok Token, err error) {
return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in))
}
-// parseTypeName parses Any type URL or extension field name. The name is
-// enclosed in [ and ] characters. The C++ parser does not handle many legal URL
-// strings. This implementation is more liberal and allows for the pattern
-// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed
-// in between [ ], '.', '/' and the sub names.
+// parseTypeName parses an Any type URL or an extension field name. The name is
+// enclosed in [ and ] characters. We allow almost arbitrary type URL prefixes,
+// closely following the text-format spec [1,2]. We implement "ExtensionName |
+// AnyName" as follows (with some exceptions for backwards compatibility):
+//
+// char = [-_a-zA-Z0-9]
+// url_char = char | [.~!$&'()*+,;=] | "%", hex, hex
+//
+// Ident = char, { char }
+// TypeName = Ident, { ".", Ident } ;
+// UrlPrefix = url_char, { url_char | "/" } ;
+// ExtensionName = "[", TypeName, "]" ;
+// AnyName = "[", UrlPrefix, "/", TypeName, "]" ;
+//
+// Additionally, we allow arbitrary whitespace and comments between [ and ].
+//
+// [1] https://protobuf.dev/reference/protobuf/textformat-spec/#characters
+// [2] https://protobuf.dev/reference/protobuf/textformat-spec/#field-names
func (d *Decoder) parseTypeName() (Token, error) {
- startPos := len(d.orig) - len(d.in)
// Use alias s to advance first in order to use d.in for error handling.
- // Caller already checks for [ as first character.
+ // Caller already checks for [ as first character (d.in[0] == '[').
s := consume(d.in[1:], 0)
if len(s) == 0 {
return Token{}, ErrUnexpectedEOF
}
+ // Collect everything between [ and ] in name.
var name []byte
- for len(s) > 0 && isTypeNameChar(s[0]) {
- name = append(name, s[0])
- s = s[1:]
- }
- s = consume(s, 0)
-
var closed bool
for len(s) > 0 && !closed {
switch {
@@ -452,23 +459,20 @@ func (d *Decoder) parseTypeName() (Token, error) {
s = s[1:]
closed = true
- case s[0] == '/', s[0] == '.':
- if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') {
- return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
- d.orig[startPos:len(d.orig)-len(s)+1])
- }
+ case s[0] == '/' || isTypeNameChar(s[0]) || isUrlExtraChar(s[0]):
name = append(name, s[0])
- s = s[1:]
- s = consume(s, 0)
- for len(s) > 0 && isTypeNameChar(s[0]) {
- name = append(name, s[0])
- s = s[1:]
+ s = consume(s[1:], 0)
+
+ // URL percent-encoded chars
+ case s[0] == '%':
+ if len(s) < 3 || !isHexChar(s[1]) || !isHexChar(s[2]) {
+ return Token{}, d.parseTypeNameError(s, 3)
}
- s = consume(s, 0)
+ name = append(name, s[0], s[1], s[2])
+ s = consume(s[3:], 0)
default:
- return Token{}, d.newSyntaxError(
- "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1])
+ return Token{}, d.parseTypeNameError(s, 1)
}
}
@@ -476,15 +480,38 @@ func (d *Decoder) parseTypeName() (Token, error) {
return Token{}, ErrUnexpectedEOF
}
- // First character cannot be '.'. Last character cannot be '.' or '/'.
- size := len(name)
- if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' {
- return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s",
- d.orig[startPos:len(d.orig)-len(s)])
+ // Split collected name on last '/' into urlPrefix and typeName (if '/' is
+ // present).
+ typeName := name
+ if i := bytes.LastIndexByte(name, '/'); i != -1 {
+ urlPrefix := name[:i]
+ typeName = name[i+1:]
+
+ // urlPrefix may be empty (for backwards compatibility).
+ // If non-empty, it must not start with '/'.
+ if len(urlPrefix) > 0 && urlPrefix[0] == '/' {
+ return Token{}, d.parseTypeNameError(s, 0)
+ }
}
+ // typeName must not be empty (note: "" splits to [""]) and all identifier
+ // parts must not be empty.
+ for _, ident := range bytes.Split(typeName, []byte{'.'}) {
+ if len(ident) == 0 {
+ return Token{}, d.parseTypeNameError(s, 0)
+ }
+ }
+
+ // typeName must not contain any percent-encoded or special URL chars.
+ for _, b := range typeName {
+ if b == '%' || (b != '.' && isUrlExtraChar(b)) {
+ return Token{}, d.parseTypeNameError(s, 0)
+ }
+ }
+
+ startPos := len(d.orig) - len(d.in)
+ endPos := len(d.orig) - len(s)
d.in = s
- endPos := len(d.orig) - len(d.in)
d.consume(0)
return Token{
@@ -496,16 +523,32 @@ func (d *Decoder) parseTypeName() (Token, error) {
}, nil
}
+func (d *Decoder) parseTypeNameError(s []byte, numUnconsumedChars int) error {
+ return d.newSyntaxError(
+ "invalid type URL/extension field name: %s",
+ d.in[:len(d.in)-len(s)+min(numUnconsumedChars, len(s))],
+ )
+}
+
+func isHexChar(b byte) bool {
+ return ('0' <= b && b <= '9') ||
+ ('a' <= b && b <= 'f') ||
+ ('A' <= b && b <= 'F')
+}
+
func isTypeNameChar(b byte) bool {
- return (b == '-' || b == '_' ||
+ return b == '-' || b == '_' ||
('0' <= b && b <= '9') ||
('a' <= b && b <= 'z') ||
- ('A' <= b && b <= 'Z'))
+ ('A' <= b && b <= 'Z')
}
-func isWhiteSpace(b byte) bool {
+// isUrlExtraChar complements isTypeNameChar with extra characters that we allow
+// in URLs but not in type names. Note that '/' is not included so that it can
+// be treated specially.
+func isUrlExtraChar(b byte) bool {
switch b {
- case ' ', '\n', '\r', '\t':
+ case '.', '~', '!', '$', '&', '(', ')', '*', '+', ',', ';', '=':
return true
default:
return false
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 688aabe4..c775e583 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -32,6 +32,7 @@ const (
EditionProto3 Edition = 999
Edition2023 Edition = 1000
Edition2024 Edition = 1001
+ EditionUnstable Edition = 9999
EditionUnsupported Edition = 100000
)
@@ -72,9 +73,10 @@ type (
EditionFeatures EditionFeatures
}
FileL2 struct {
- Options func() protoreflect.ProtoMessage
- Imports FileImports
- Locations SourceLocations
+ Options func() protoreflect.ProtoMessage
+ Imports FileImports
+ OptionImports func() protoreflect.FileImports
+ Locations SourceLocations
}
// EditionFeatures is a frequently-instantiated struct, so please take care
@@ -126,12 +128,9 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
func (fd *File) Parent() protoreflect.Descriptor { return nil }
func (fd *File) Index() int { return 0 }
func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax }
-
-// Not exported and just used to reconstruct the original FileDescriptor proto
-func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
-func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
-func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
-func (fd *File) IsPlaceholder() bool { return false }
+func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
+func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
+func (fd *File) IsPlaceholder() bool { return false }
func (fd *File) Options() protoreflect.ProtoMessage {
if f := fd.lazyInit().Options; f != nil {
return f()
@@ -150,6 +149,16 @@ func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatD
func (fd *File) ProtoType(protoreflect.FileDescriptor) {}
func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
+// The next two are not part of the FileDescriptor interface. They are just used to reconstruct
+// the original FileDescriptor proto.
+func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
+func (fd *File) OptionImports() protoreflect.FileImports {
+ if f := fd.lazyInit().OptionImports; f != nil {
+ return f()
+ }
+ return emptyFiles
+}
+
func (fd *File) lazyInit() *FileL2 {
if atomic.LoadUint32(&fd.once) == 0 {
fd.lazyInitOnce()
@@ -182,9 +191,9 @@ type (
L2 *EnumL2 // protected by fileDesc.once
}
EnumL1 struct {
- eagerValues bool // controls whether EnumL2.Values is already populated
-
EditionFeatures EditionFeatures
+ Visibility int32
+ eagerValues bool // controls whether EnumL2.Values is already populated
}
EnumL2 struct {
Options func() protoreflect.ProtoMessage
@@ -219,6 +228,11 @@ func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit()
func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges }
func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {}
+
+// This is not part of the EnumDescriptor interface. It is just used to reconstruct
+// the original FileDescriptor proto.
+func (ed *Enum) Visibility() int32 { return ed.L1.Visibility }
+
func (ed *Enum) lazyInit() *EnumL2 {
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
return ed.L2
@@ -244,13 +258,13 @@ type (
L2 *MessageL2 // protected by fileDesc.once
}
MessageL1 struct {
- Enums Enums
- Messages Messages
- Extensions Extensions
- IsMapEntry bool // promoted from google.protobuf.MessageOptions
- IsMessageSet bool // promoted from google.protobuf.MessageOptions
-
+ Enums Enums
+ Messages Messages
+ Extensions Extensions
EditionFeatures EditionFeatures
+ Visibility int32
+ IsMapEntry bool // promoted from google.protobuf.MessageOptions
+ IsMessageSet bool // promoted from google.protobuf.MessageOptions
}
MessageL2 struct {
Options func() protoreflect.ProtoMessage
@@ -319,6 +333,11 @@ func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L
func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions }
func (md *Message) ProtoType(protoreflect.MessageDescriptor) {}
func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+
+// This is not part of the MessageDescriptor interface. It is just used to reconstruct
+// the original FileDescriptor proto.
+func (md *Message) Visibility() int32 { return md.L1.Visibility }
+
func (md *Message) lazyInit() *MessageL2 {
md.L0.ParentFile.lazyInit() // implicitly initializes L2
return md.L2
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index d2f54949..e91860f5 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -284,6 +284,13 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl
case genid.EnumDescriptorProto_Value_field_number:
numValues++
}
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.EnumDescriptorProto_Visibility_field_number:
+ ed.L1.Visibility = int32(v)
+ }
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
@@ -365,6 +372,13 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor
md.unmarshalSeedOptions(v)
}
prevField = num
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.DescriptorProto_Visibility_field_number:
+ md.L1.Visibility = int32(v)
+ }
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index d4c94458..78f02b1b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -134,6 +134,7 @@ func (fd *File) unmarshalFull(b []byte) {
var enumIdx, messageIdx, extensionIdx, serviceIdx int
var rawOptions []byte
+ var optionImports []string
fd.L2 = new(FileL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@@ -157,6 +158,8 @@ func (fd *File) unmarshalFull(b []byte) {
imp = PlaceholderFile(path)
}
fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp})
+ case genid.FileDescriptorProto_OptionDependency_field_number:
+ optionImports = append(optionImports, sb.MakeString(v))
case genid.FileDescriptorProto_EnumType_field_number:
fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
@@ -178,6 +181,23 @@ func (fd *File) unmarshalFull(b []byte) {
}
}
fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
+ if len(optionImports) > 0 {
+ var imps FileImports
+ var once sync.Once
+ fd.L2.OptionImports = func() protoreflect.FileImports {
+ once.Do(func() {
+ imps = make(FileImports, len(optionImports))
+ for i, path := range optionImports {
+ imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
+ if imp == nil {
+ imp = PlaceholderFile(path)
+ }
+ imps[i] = protoreflect.FileImport{FileDescriptor: imp}
+ }
+ })
+ return &imps
+ }
+ }
}
func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
@@ -310,7 +330,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb)
extensionIdx++
case genid.DescriptorProto_Options_field_number:
- md.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -336,27 +355,6 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) {
md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions)
}
-func (md *Message) unmarshalOptions(b []byte) {
- for len(b) > 0 {
- num, typ, n := protowire.ConsumeTag(b)
- b = b[n:]
- switch typ {
- case protowire.VarintType:
- v, m := protowire.ConsumeVarint(b)
- b = b[m:]
- switch num {
- case genid.MessageOptions_MapEntry_field_number:
- md.L1.IsMapEntry = protowire.DecodeBool(v)
- case genid.MessageOptions_MessageSetWireFormat_field_number:
- md.L1.IsMessageSet = protowire.DecodeBool(v)
- }
- default:
- m := protowire.ConsumeFieldValue(num, typ, b)
- b = b[m:]
- }
- }
-}
-
func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) {
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 950a6a32..65aaf4d2 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -26,6 +26,7 @@ const (
Edition_EDITION_PROTO3_enum_value = 999
Edition_EDITION_2023_enum_value = 1000
Edition_EDITION_2024_enum_value = 1001
+ Edition_EDITION_UNSTABLE_enum_value = 9999
Edition_EDITION_1_TEST_ONLY_enum_value = 1
Edition_EDITION_2_TEST_ONLY_enum_value = 2
Edition_EDITION_99997_TEST_ONLY_enum_value = 99997
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index 229c6980..4a3bf393 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -113,6 +113,9 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO
}
func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if opts.depth--; opts.depth < 0 {
+ return out, errRecursionDepth
+ }
if wtyp != protowire.BytesType {
return out, errUnknown
}
@@ -170,6 +173,9 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo
}
func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if opts.depth--; opts.depth < 0 {
+ return out, errRecursionDepth
+ }
if wtyp != protowire.BytesType {
return out, errUnknown
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index e0dd21fa..1228b5c8 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -102,8 +102,7 @@ var errUnknown = errors.New("unknown")
func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
mi.init()
- opts.depth--
- if opts.depth < 0 {
+ if opts.depth--; opts.depth < 0 {
return out, errRecursionDepth
}
if flags.ProtoLegacy && mi.isMessageSet {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index 7b2995dd..99a1eb95 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -68,9 +68,13 @@ func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out pr
if in.Resolver == nil {
in.Resolver = protoregistry.GlobalTypes
}
+ if in.Depth == 0 {
+ in.Depth = protowire.DefaultRecursionLimit
+ }
o, st := mi.validate(in.Buf, 0, unmarshalOptions{
flags: in.Flags,
resolver: in.Resolver,
+ depth: in.Depth,
})
if o.initialized {
out.Flags |= protoiface.UnmarshalInitialized
@@ -257,6 +261,9 @@ func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmars
states[0].typ = validationTypeGroup
states[0].endGroup = groupTag
}
+ if opts.depth--; opts.depth < 0 {
+ return out, ValidationInvalid
+ }
initialized := true
start := len(b)
State:
@@ -451,6 +458,13 @@ State:
mi: vi.mi,
tail: b,
})
+ if vi.typ == validationTypeMessage ||
+ vi.typ == validationTypeGroup ||
+ vi.typ == validationTypeMap {
+ if opts.depth--; opts.depth < 0 {
+ return out, ValidationInvalid
+ }
+ }
b = v
continue State
case validationTypeRepeatedVarint:
@@ -499,6 +513,9 @@ State:
mi: vi.mi,
endGroup: num,
})
+ if opts.depth--; opts.depth < 0 {
+ return out, ValidationInvalid
+ }
continue State
case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem:
typeid, v, n, err := messageset.ConsumeFieldValue(b, false)
@@ -521,6 +538,13 @@ State:
mi: xvi.mi,
tail: b[n:],
})
+ if xvi.typ == validationTypeMessage ||
+ xvi.typ == validationTypeGroup ||
+ xvi.typ == validationTypeMap {
+ if opts.depth--; opts.depth < 0 {
+ return out, ValidationInvalid
+ }
+ }
b = v
continue State
}
@@ -547,12 +571,14 @@ State:
switch st.typ {
case validationTypeMessage, validationTypeGroup:
numRequiredFields = int(st.mi.numRequiredFields)
+ opts.depth++
case validationTypeMap:
// If this is a map field with a message value that contains
// required fields, require that the value be present.
if st.mi != nil && st.mi.numRequiredFields > 0 {
numRequiredFields = 1
}
+ opts.depth++
}
// If there are more than 64 required fields, this check will
// always fail and we will report that the message is potentially
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 31e79a65..763fd828 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
const (
Major = 1
Minor = 36
- Patch = 9
+ Patch = 11
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index 4cbf1aea..889d8511 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -121,9 +121,8 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto
out, err = methods.Unmarshal(in)
} else {
- o.RecursionLimit--
- if o.RecursionLimit < 0 {
- return out, errors.New("exceeded max recursion depth")
+ if o.RecursionLimit--; o.RecursionLimit < 0 {
+ return out, errRecursionDepth
}
err = o.unmarshalMessageSlow(b, m)
}
@@ -220,6 +219,9 @@ func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m pro
}
func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) {
+ if o.RecursionLimit--; o.RecursionLimit < 0 {
+ return 0, errRecursionDepth
+ }
if wtyp != protowire.BytesType {
return 0, errUnknown
}
@@ -305,3 +307,5 @@ func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv proto
var errUnknown = errors.New("BUG: internal error (unknown)")
var errDecode = errors.New("cannot parse invalid wire-format data")
+
+var errRecursionDepth = errors.New("exceeded maximum recursion depth")
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 06d584c1..484c21fd 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -172,13 +172,14 @@ import (
// ) to obtain a formatter capable of generating timestamps in this format.
type Timestamp struct {
state protoimpl.MessageState `protogen:"open.v1"`
- // Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
- // 9999-12-31T23:59:59Z inclusive.
+ // Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must
+ // be between -315576000000 and 315576000000 inclusive (which corresponds to
+ // 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z).
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- // Non-negative fractions of a second at nanosecond resolution. Negative
- // second values with fractions must still have non-negative nanos values
- // that count forward in time. Must be from 0 to 999,999,999
+ // Non-negative fractions of a second at nanosecond resolution. This field is
+ // the nanosecond portion of the duration, not an alternative to seconds.
+ // Negative second values with fractions must still have non-negative nanos
+ // values that count forward in time. Must be between 0 and 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
unknownFields protoimpl.UnknownFields
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 76dd7af3..e2af59db 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -3,8 +3,8 @@
## explicit; go 1.24.0
connectrpc.com/connect
connectrpc.com/connect/internal/gen/connectext/grpc/status/v1
-# filippo.io/age v1.2.1
-## explicit; go 1.19
+# filippo.io/age v1.3.1
+## explicit; go 1.24.0
filippo.io/age
filippo.io/age/agessh
filippo.io/age/internal/bech32
@@ -14,9 +14,18 @@ filippo.io/age/internal/stream
## explicit; go 1.20
filippo.io/edwards25519
filippo.io/edwards25519/field
+# filippo.io/hpke v0.4.0
+## explicit; go 1.24.0
+filippo.io/hpke
+filippo.io/hpke/crypto
+filippo.io/hpke/crypto/ecdh
+filippo.io/hpke/internal/byteorder
# github.com/cenkalti/backoff/v5 v5.0.3
## explicit; go 1.23
github.com/cenkalti/backoff/v5
+# github.com/cespare/xxhash/v2 v2.3.0
+## explicit; go 1.11
+github.com/cespare/xxhash/v2
# github.com/containerd/nri v0.10.0
## explicit; go 1.24.3
github.com/containerd/nri/pkg/net
@@ -49,8 +58,8 @@ github.com/godbus/dbus/v5
# github.com/google/uuid v1.6.0
## explicit
github.com/google/uuid
-# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2
-## explicit; go 1.23.0
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7
+## explicit; go 1.24.0
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
@@ -65,8 +74,6 @@ github.com/inconshreveable/mousetrap
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
## explicit
github.com/pmezard/go-difflib/difflib
-# github.com/rogpeppe/go-internal v1.13.1
-## explicit; go 1.22
# github.com/sirupsen/logrus v1.9.3
## explicit; go 1.13
github.com/sirupsen/logrus
@@ -81,72 +88,77 @@ github.com/spf13/pflag
github.com/stretchr/testify/assert
github.com/stretchr/testify/assert/yaml
github.com/stretchr/testify/require
-# go.opentelemetry.io/auto/sdk v1.1.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/auto/sdk v1.2.1
+## explicit; go 1.24.0
go.opentelemetry.io/auto/sdk
go.opentelemetry.io/auto/sdk/internal/telemetry
-# go.opentelemetry.io/otel v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel v1.40.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
go.opentelemetry.io/otel/attribute/internal
+go.opentelemetry.io/otel/attribute/internal/xxhash
go.opentelemetry.io/otel/baggage
go.opentelemetry.io/otel/codes
go.opentelemetry.io/otel/internal/baggage
go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
-go.opentelemetry.io/otel/semconv/v1.26.0
go.opentelemetry.io/otel/semconv/v1.37.0
-go.opentelemetry.io/otel/semconv/v1.37.0/otelconv
-# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0
-## explicit; go 1.23.0
+go.opentelemetry.io/otel/semconv/v1.39.0
+go.opentelemetry.io/otel/semconv/v1.39.0/otelconv
+# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace
go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
-# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/counter
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/observ
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/metric v1.38.0
-## explicit; go 1.23.0
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/x
+# go.opentelemetry.io/otel/metric v1.40.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
go.opentelemetry.io/otel/metric/noop
-# go.opentelemetry.io/otel/sdk v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel/sdk v1.40.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
-go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/internal/x
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
-go.opentelemetry.io/otel/sdk/trace/internal/x
+go.opentelemetry.io/otel/sdk/trace/internal/env
+go.opentelemetry.io/otel/sdk/trace/internal/observ
go.opentelemetry.io/otel/sdk/trace/tracetest
-# go.opentelemetry.io/otel/sdk/metric v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel/sdk/metric v1.40.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/sdk/metric
go.opentelemetry.io/otel/sdk/metric/exemplar
go.opentelemetry.io/otel/sdk/metric/internal
go.opentelemetry.io/otel/sdk/metric/internal/aggregate
-go.opentelemetry.io/otel/sdk/metric/internal/x
+go.opentelemetry.io/otel/sdk/metric/internal/observ
+go.opentelemetry.io/otel/sdk/metric/internal/reservoir
go.opentelemetry.io/otel/sdk/metric/metricdata
-# go.opentelemetry.io/otel/trace v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel/trace v1.40.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/internal/telemetry
go.opentelemetry.io/otel/trace/noop
-# go.opentelemetry.io/proto/otlp v1.7.1
+# go.opentelemetry.io/proto/otlp v1.9.0
## explicit; go 1.23.0
go.opentelemetry.io/proto/otlp/collector/metrics/v1
go.opentelemetry.io/proto/otlp/collector/trace/v1
@@ -154,8 +166,8 @@ go.opentelemetry.io/proto/otlp/common/v1
go.opentelemetry.io/proto/otlp/metrics/v1
go.opentelemetry.io/proto/otlp/resource/v1
go.opentelemetry.io/proto/otlp/trace/v1
-# golang.org/x/crypto v0.41.0
-## explicit; go 1.23.0
+# golang.org/x/crypto v0.47.0
+## explicit; go 1.24.0
golang.org/x/crypto/blowfish
golang.org/x/crypto/chacha20
golang.org/x/crypto/chacha20poly1305
@@ -167,14 +179,14 @@ golang.org/x/crypto/pbkdf2
golang.org/x/crypto/scrypt
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
-# golang.org/x/mod v0.26.0
-## explicit; go 1.23.0
+# golang.org/x/mod v0.31.0
+## explicit; go 1.24.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.43.0
-## explicit; go 1.23.0
+# golang.org/x/net v0.49.0
+## explicit; go 1.24.0
golang.org/x/net/http/httpguts
golang.org/x/net/http2
golang.org/x/net/http2/hpack
@@ -182,18 +194,18 @@ golang.org/x/net/idna
golang.org/x/net/internal/httpcommon
golang.org/x/net/internal/timeseries
golang.org/x/net/trace
-# golang.org/x/sys v0.37.0
+# golang.org/x/sys v0.40.0
## explicit; go 1.24.0
golang.org/x/sys/cpu
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/term v0.34.0
-## explicit; go 1.23.0
+# golang.org/x/term v0.39.0
+## explicit; go 1.24.0
golang.org/x/term
-# golang.org/x/text v0.28.0
-## explicit; go 1.23.0
+# golang.org/x/text v0.33.0
+## explicit; go 1.24.0
golang.org/x/text/encoding
golang.org/x/text/encoding/internal
golang.org/x/text/encoding/internal/identifier
@@ -204,15 +216,15 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5
-## explicit; go 1.23.0
+# google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409
+## explicit; go 1.24.0
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5
-## explicit; go 1.23.0
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409
+## explicit; go 1.24.0
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.75.0
-## explicit; go 1.23.0
+# google.golang.org/grpc v1.78.0
+## explicit; go 1.24.0
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
@@ -222,7 +234,6 @@ google.golang.org/grpc/balancer/endpointsharding
google.golang.org/grpc/balancer/grpclb/state
google.golang.org/grpc/balancer/pickfirst
google.golang.org/grpc/balancer/pickfirst/internal
-google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/channelz
@@ -232,6 +243,7 @@ google.golang.org/grpc/credentials
google.golang.org/grpc/credentials/insecure
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/gzip
+google.golang.org/grpc/encoding/internal
google.golang.org/grpc/encoding/proto
google.golang.org/grpc/experimental/stats
google.golang.org/grpc/grpclog
@@ -275,7 +287,7 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.36.9
+# google.golang.org/protobuf v1.36.11
## explicit; go 1.23
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
diff --git a/x/go.mod b/x/go.mod
index c7071da7..368f55a3 100644
--- a/x/go.mod
+++ b/x/go.mod
@@ -9,34 +9,35 @@ require (
github.com/spf13/cobra v1.10.1
github.com/spf13/pflag v1.0.9
github.com/stretchr/testify v1.11.1
- go.opentelemetry.io/otel v1.38.0
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0
- go.opentelemetry.io/otel/sdk v1.38.0
- go.opentelemetry.io/otel/sdk/metric v1.38.0
- golang.org/x/mod v0.26.0
- golang.org/x/sys v0.37.0
- google.golang.org/protobuf v1.36.9
+ go.opentelemetry.io/otel v1.40.0
+ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0
+ go.opentelemetry.io/otel/sdk v1.40.0
+ go.opentelemetry.io/otel/sdk/metric v1.40.0
+ golang.org/x/mod v0.31.0
+ golang.org/x/sys v0.40.0
+ google.golang.org/protobuf v1.36.11
)
require (
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
- go.opentelemetry.io/otel/metric v1.38.0 // indirect
- go.opentelemetry.io/otel/trace v1.38.0 // indirect
- go.opentelemetry.io/proto/otlp v1.7.1 // indirect
- golang.org/x/net v0.43.0 // indirect
- golang.org/x/text v0.28.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect
- google.golang.org/grpc v1.75.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect
+ go.opentelemetry.io/otel/metric v1.40.0 // indirect
+ go.opentelemetry.io/otel/trace v1.40.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.9.0 // indirect
+ golang.org/x/net v0.49.0 // indirect
+ golang.org/x/text v0.33.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect
+ google.golang.org/grpc v1.78.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/x/go.sum b/x/go.sum
index f3a9083b..6dd1cc9b 100644
--- a/x/go.sum
+++ b/x/go.sum
@@ -2,6 +2,8 @@ connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14=
connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -18,8 +20,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8=
github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -31,8 +33,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
@@ -44,47 +46,47 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8=
-go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
-go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
-go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
-go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
-go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
-go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
-go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
+go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
+go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 h1:NOyNnS19BF2SUDApbOKbDtWZ0IK7b8FJ2uAGdIWOGb0=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0/go.mod h1:VL6EgVikRLcJa9ftukrHu/ZkkhFBSo1lzvdBC9CF1ss=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs=
+go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
+go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
+go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
+go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
+go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
+go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
+go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
+go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
-golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
-golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
-golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
-golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
+golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
-golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY=
-google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc=
-google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4=
-google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
-google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
-google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M=
+google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=